repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
vv1133/home_web
|
django/contrib/gis/tests/relatedapp/tests.py
|
58
|
14918
|
from __future__ import absolute_import
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from django.utils.unittest import skipUnless
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
|
bsd-3-clause
|
google/contentbox
|
third_party/modeltranslation/manager.py
|
5
|
13076
|
# -*- coding: utf-8 -*-
"""
The idea of MultilingualManager is taken from
django-linguo by Zach Mathew
https://github.com/zmathew/django-linguo
"""
from django.db import models
from django.db.models import FieldDoesNotExist
from django.db.models.fields.related import RelatedField, RelatedObject
from django.db.models.sql.where import Constraint
from django.utils.tree import Node
from modeltranslation import settings
from modeltranslation.fields import TranslationField
from modeltranslation.utils import (build_localized_fieldname, get_language,
auto_populate)
def get_translatable_fields_for_model(model):
from modeltranslation.translator import NotRegistered, translator
try:
return translator.get_options_for_model(model).get_field_names()
except NotRegistered:
return None
def rewrite_lookup_key(model, lookup_key):
pieces = lookup_key.split('__', 1)
original_key = pieces[0]
translatable_fields = get_translatable_fields_for_model(model)
if translatable_fields is not None:
# If we are doing a lookup on a translatable field,
# we want to rewrite it to the actual field name
# For example, we want to rewrite "name__startswith" to "name_fr__startswith"
if pieces[0] in translatable_fields:
pieces[0] = build_localized_fieldname(pieces[0], get_language())
if len(pieces) > 1:
# Check if we are doing a lookup to a related trans model
fields_to_trans_models = get_fields_to_translatable_models(model)
for field_to_trans, transmodel in fields_to_trans_models:
# Check ``original key``, as pieces[0] may have been already rewritten.
if original_key == field_to_trans:
pieces[1] = rewrite_lookup_key(transmodel, pieces[1])
break
return '__'.join(pieces)
def rewrite_order_lookup_key(model, lookup_key):
if lookup_key.startswith('-'):
return '-' + rewrite_lookup_key(model, lookup_key[1:])
else:
return rewrite_lookup_key(model, lookup_key)
_F2TM_CACHE = {}
def get_fields_to_translatable_models(model):
if model not in _F2TM_CACHE:
results = []
for field_name in model._meta.get_all_field_names():
field_object, modelclass, direct, m2m = model._meta.get_field_by_name(field_name)
# Direct relationship
if direct and isinstance(field_object, RelatedField):
if get_translatable_fields_for_model(field_object.related.parent_model) is not None:
results.append((field_name, field_object.related.parent_model))
# Reverse relationship
if isinstance(field_object, RelatedObject):
if get_translatable_fields_for_model(field_object.model) is not None:
results.append((field_name, field_object.model))
_F2TM_CACHE[model] = results
return _F2TM_CACHE[model]
_C2F_CACHE = {}
def get_field_by_colum_name(model, col):
# First, try field with the column name
try:
field = model._meta.get_field(col)
if field.column == col:
return field
except FieldDoesNotExist:
pass
field = _C2F_CACHE.get((model, col), None)
if field:
return field
# D'oh, need to search through all of them.
for field in model._meta.fields:
if field.column == col:
_C2F_CACHE[(model, col)] = field
return field
assert False, "No field found for column %s" % col
class MultilingualQuerySet(models.query.QuerySet):
def __init__(self, *args, **kwargs):
super(MultilingualQuerySet, self).__init__(*args, **kwargs)
self._post_init()
def _post_init(self):
self._rewrite = True
self._populate = None
if self.model and (not self.query.order_by):
if self.model._meta.ordering:
# If we have default ordering specified on the model, set it now so that
# it can be rewritten. Otherwise sql.compiler will grab it directly from _meta
ordering = []
for key in self.model._meta.ordering:
ordering.append(rewrite_order_lookup_key(self.model, key))
self.query.add_ordering(*ordering)
# This method was not present in django-linguo
def _clone(self, klass=None, *args, **kwargs):
if klass is not None and not issubclass(klass, MultilingualQuerySet):
class NewClass(klass, MultilingualQuerySet):
pass
NewClass.__name__ = 'Multilingual%s' % klass.__name__
klass = NewClass
kwargs.setdefault('_rewrite', self._rewrite)
kwargs.setdefault('_populate', self._populate)
return super(MultilingualQuerySet, self)._clone(klass, *args, **kwargs)
# This method was not present in django-linguo
def rewrite(self, mode=True):
return self._clone(_rewrite=mode)
# This method was not present in django-linguo
def populate(self, mode='all'):
"""
Overrides the translation fields population mode for this query set.
"""
return self._clone(_populate=mode)
def _rewrite_applied_operations(self):
"""
Rewrite fields in already applied filters/ordering.
Useful when converting any QuerySet into MultilingualQuerySet.
"""
self._rewrite_where(self.query.where)
self._rewrite_where(self.query.having)
self._rewrite_order()
def _rewrite_where(self, q):
"""
Rewrite field names inside WHERE tree.
"""
if isinstance(q, tuple) and isinstance(q[0], Constraint):
c = q[0]
if c.field is None:
c.field = get_field_by_colum_name(self.model, c.col)
new_name = rewrite_lookup_key(self.model, c.field.name)
if c.field.name != new_name:
c.field = self.model._meta.get_field(new_name)
c.col = c.field.column
if isinstance(q, Node):
for child in q.children:
self._rewrite_where(child)
def _rewrite_order(self):
self.query.order_by = [rewrite_order_lookup_key(self.model, field_name)
for field_name in self.query.order_by]
# This method was not present in django-linguo
def _rewrite_q(self, q):
"""Rewrite field names inside Q call."""
if isinstance(q, tuple) and len(q) == 2:
return rewrite_lookup_key(self.model, q[0]), q[1]
if isinstance(q, Node):
q.children = list(map(self._rewrite_q, q.children))
return q
# This method was not present in django-linguo
def _rewrite_f(self, q):
"""
Rewrite field names inside F call.
"""
if isinstance(q, models.F):
q.name = rewrite_lookup_key(self.model, q.name)
return q
if isinstance(q, Node):
q.children = list(map(self._rewrite_f, q.children))
return q
def _filter_or_exclude(self, negate, *args, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self)._filter_or_exclude(negate, *args, **kwargs)
args = map(self._rewrite_q, args)
for key, val in kwargs.items():
new_key = rewrite_lookup_key(self.model, key)
del kwargs[key]
kwargs[new_key] = self._rewrite_f(val)
return super(MultilingualQuerySet, self)._filter_or_exclude(negate, *args, **kwargs)
def _get_original_fields(self):
return [f.attname for f in self.model._meta.fields if not isinstance(f, TranslationField)]
def order_by(self, *field_names):
"""
Change translatable field names in an ``order_by`` argument
to translation fields for the current language.
"""
if not self._rewrite:
return super(MultilingualQuerySet, self).order_by(*field_names)
new_args = []
for key in field_names:
new_args.append(rewrite_order_lookup_key(self.model, key))
return super(MultilingualQuerySet, self).order_by(*new_args)
def update(self, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).update(**kwargs)
for key, val in kwargs.items():
new_key = rewrite_lookup_key(self.model, key)
del kwargs[key]
kwargs[new_key] = self._rewrite_f(val)
return super(MultilingualQuerySet, self).update(**kwargs)
update.alters_data = True
# This method was not present in django-linguo
@property
def _populate_mode(self):
# Populate can be set using a global setting or a manager method.
if self._populate is None:
return settings.AUTO_POPULATE
return self._populate
# This method was not present in django-linguo
def create(self, **kwargs):
"""
Allows to override population mode with a ``populate`` method.
"""
with auto_populate(self._populate_mode):
return super(MultilingualQuerySet, self).create(**kwargs)
# This method was not present in django-linguo
def get_or_create(self, **kwargs):
"""
Allows to override population mode with a ``populate`` method.
"""
with auto_populate(self._populate_mode):
return super(MultilingualQuerySet, self).get_or_create(**kwargs)
def _append_translated(self, fields):
"If translated field is encountered, add also all its translation fields."
fields = set(fields)
from modeltranslation.translator import translator
opts = translator.get_options_for_model(self.model)
for key, translated in opts.fields.items():
if key in fields:
fields = fields.union(f.name for f in translated)
return fields
# This method was not present in django-linguo
def defer(self, *fields):
fields = self._append_translated(fields)
return super(MultilingualQuerySet, self).defer(*fields)
# This method was not present in django-linguo
def only(self, *fields):
fields = self._append_translated(fields)
return super(MultilingualQuerySet, self).only(*fields)
# This method was not present in django-linguo
def raw_values(self, *fields):
return super(MultilingualQuerySet, self).values(*fields)
# This method was not present in django-linguo
def values(self, *fields):
if not self._rewrite:
return super(MultilingualQuerySet, self).values(*fields)
if not fields:
# Emulate original queryset behaviour: get all fields that are not translation fields
fields = self._get_original_fields()
new_args = []
for key in fields:
new_args.append(rewrite_lookup_key(self.model, key))
vqs = super(MultilingualQuerySet, self).values(*new_args)
vqs.field_names = list(fields)
return vqs
# This method was not present in django-linguo
def values_list(self, *fields, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).values_list(*fields, **kwargs)
if not fields:
# Emulate original queryset behaviour: get all fields that are not translation fields
fields = self._get_original_fields()
new_args = []
for key in fields:
new_args.append(rewrite_lookup_key(self.model, key))
return super(MultilingualQuerySet, self).values_list(*new_args, **kwargs)
# This method was not present in django-linguo
def dates(self, field_name, *args, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).dates(field_name, *args, **kwargs)
new_key = rewrite_lookup_key(self.model, field_name)
return super(MultilingualQuerySet, self).dates(new_key, *args, **kwargs)
class MultilingualManager(models.Manager):
use_for_related_fields = True
def rewrite(self, *args, **kwargs):
return self.get_queryset().rewrite(*args, **kwargs)
def populate(self, *args, **kwargs):
return self.get_queryset().populate(*args, **kwargs)
def raw_values(self, *args, **kwargs):
return self.get_queryset().raw_values(*args, **kwargs)
def get_queryset(self):
if hasattr(super(MultilingualManager, self), 'get_queryset'):
qs = super(MultilingualManager, self).get_queryset()
else: # Django 1.4 / 1.5 compat
qs = super(MultilingualManager, self).get_query_set()
if qs.__class__ == models.query.QuerySet:
qs.__class__ = MultilingualQuerySet
else:
class NewClass(qs.__class__, MultilingualQuerySet):
pass
NewClass.__name__ = 'Multilingual%s' % qs.__class__.__name__
qs.__class__ = NewClass
qs._post_init()
qs._rewrite_applied_operations()
return qs
get_query_set = get_queryset
|
apache-2.0
|
payjp/payjp-python
|
payjp/test/helper.py
|
1
|
3831
|
import datetime
import json
import os
import random
import re
import string
import unittest
from mock import patch, Mock
from six import string_types
import payjp
NOW = datetime.datetime.now()
DUMMY_CARD = {
'number': '4242424242424242',
'exp_month': NOW.month,
'exp_year': NOW.year + 4
}
DUMMY_CHARGE = {
'amount': 100,
'currency': 'jpy',
'card': DUMMY_CARD
}
DUMMY_PLAN = {
'amount': 2000,
'interval': 'month',
'name': 'Amazing Gold Plan',
'currency': 'jpy',
'id': ('payjp-test-gold-' +
''.join(random.choice(string.ascii_lowercase) for x in range(10)))
}
DUMMY_TRANSFER = {
'amount': 400,
'currency': 'jpy',
'recipient': 'self'
}
class PayjpTestCase(unittest.TestCase):
RESTORE_ATTRIBUTES = ('api_version', 'api_key', 'max_retry', 'retry_initial_delay', 'retry_max_delay')
def setUp(self):
super(PayjpTestCase, self).setUp()
self._payjp_original_attributes = {}
for attr in self.RESTORE_ATTRIBUTES:
self._payjp_original_attributes[attr] = getattr(payjp, attr)
api_base = os.environ.get('PAYJP_API_BASE')
if api_base:
payjp.api_base = api_base
payjp.api_key = os.environ.get(
'PAYJP_API_KEY', 'sk_test_c62fade9d045b54cd76d7036')
def tearDown(self):
super(PayjpTestCase, self).tearDown()
for attr in self.RESTORE_ATTRIBUTES:
setattr(payjp, attr, self._payjp_original_attributes[attr])
# Python < 2.7 compatibility
def assertRaisesRegexp(self, exception, regexp, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except exception as err:
if regexp is None:
return True
if isinstance(regexp, string_types):
regexp = re.compile(regexp)
if not regexp.search(str(err)):
raise self.failureException('"%s" does not match "%s"' %
(regexp.pattern, str(err)))
else:
raise self.failureException(
'%s was not raised' % (exception.__name__,))
class PayjpUnitTestCase(PayjpTestCase):
REQUEST_LIBRARIES = ['requests']
def setUp(self):
super(PayjpUnitTestCase, self).setUp()
self.request_patchers = {}
self.request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
patcher = patch("payjp.http_client.%s" % (lib,))
self.request_mocks[lib] = patcher.start()
self.request_patchers[lib] = patcher
def tearDown(self):
super(PayjpUnitTestCase, self).tearDown()
for patcher in self.request_patchers.values():
patcher.stop()
class PayjpApiTestCase(PayjpTestCase):
def setUp(self):
super(PayjpApiTestCase, self).setUp()
self.requestor_patcher = patch('payjp.api_requestor.APIRequestor')
self.requestor_class_mock = self.requestor_patcher.start()
self.requestor_mock = self.requestor_class_mock.return_value
def tearDown(self):
super(PayjpApiTestCase, self).tearDown()
self.requestor_patcher.stop()
def mock_response(self, res):
self.requestor_mock.request = Mock(return_value=(res, 'reskey'))
class MyResource(payjp.resource.APIResource):
pass
class MyListable(payjp.resource.ListableAPIResource):
pass
class MyCreatable(payjp.resource.CreateableAPIResource):
pass
class MyUpdateable(payjp.resource.UpdateableAPIResource):
pass
class MyDeletable(payjp.resource.DeletableAPIResource):
pass
class MyComposite(payjp.resource.ListableAPIResource,
payjp.resource.CreateableAPIResource,
payjp.resource.UpdateableAPIResource,
payjp.resource.DeletableAPIResource):
pass
|
mit
|
millaguie/Vernam
|
vernam/message.py
|
1
|
4778
|
# -*- coding: utf-8 -*-
"""
Message module holds all methods to work with message files
"""
import sys
import os
import array
from struct import pack
from struct import unpack
import uuid
import hashlib
import keymanagement
import yaml
import ownbase32
from util import hashSum
L2RHEADER = bytearray([222, 210, 7, 163, 100])
R2LHEADER = bytearray([222, 210, 7, 163, 101])
def readMessage(keyPath, messagePath):
"""
This function reads a message (envelope) in the defined format, and
returns the data inside the file, offset in key file and the reading mode
for the key.
It checks that message key UUID matchs defined key UUID, also checks
consistency of message via sha512
Args:
* keyPath: path to the file used as key
* messagePath: path to the file used to read the message
Returns:
Data inside the envelope
"""
keyUUID = keymanagement.getCatalogUUID(keyPath)
with open(messagePath, "rb") as file:
header = bytearray(unpack(">iiiii", file.read(5*4)))
if header == L2RHEADER:
L2R = True
elif header == R2LHEADER:
L2R = False
else:
raise ValueError("File format unknown")
msgSize = unpack(">Q", file.read(8))[0]
offsetInKey = unpack(">Q", file.read(8))
msgKeyUUID1, msgKeyUUID2 = unpack(">QQ", file.read(16))
msgKeyUUID = (msgKeyUUID1 << 64) | msgKeyUUID2
if keyUUID.int != msgKeyUUID:
raise ValueError("Bad Key UUID")
message = unpack(">{}s".format(msgSize), file.read(msgSize))[0]
msgFileHash = file.read()
if hashSum(message) != msgFileHash.encode("hex"):
raise ValueError("Failed to hash message ")
return offsetInKey, L2R, message
def writeHumanMessage(outputPath, message, seek):
"""
This function writes a message in the human friendly format.
Format of the message is as follows:
offset#message
Args:
* outputPath: path to the new message file
* message: message to write in the file
* seek: offset in key to decrypt message
Returns:
None
"""
with open(outputPath, "w") as f:
f.write("{}#{}".format(seek, ownbase32.ba2ob32string(message)))
def readHumanMessage(inputPath):
"""
This function reads a message in the human friendly format.
Function will return two elements, key offset and the
encrypted message.
Args:
* inputPath: path to the message file to read
Returns:
An array:
* Offset in the key
* Encrypted message
"""
with open(inputPath, "r") as f:
s = f.read()
s = s.split("#")
return int(s[0]), s[1]
def writeMessage(keyPath, messagePath, ciphered, offsetInKey, l2r=True):
"""
This function Writes a message in the defined format. Format of the message is as follows:
* Header 20 bytes, as defined, two options, one for R2L and another for L2R it's on the todo.
* Message size in 8 bytes (64 bits) integer
* Key UUID used in message 16 bytes
* Message it's self, it's size is defined in the second field
* Hash of the message, 32 bytes a sha512
Args:
* keyPath: path to the file used as key
* messagePath: path to the file used to store the message
* ciphered: ciphered data to write in the file (envelope)
* offsetInKey: need to jump to this byte in key to decrypt
* l2r: Indicates if the key will need to be readed R2L or L2R (True)
Returns:
None
"""
keyUUID = keymanagement.getCatalogUUID(keyPath)
msgSize = len(ciphered)
with open(messagePath, "wb") as file:
max_int64 = 0xFFFFFFFFFFFFFFFF
# Write file header right to left or left to right
if l2r is True:
file.write(pack(">iiiii", *L2RHEADER))
else:
file.write(pack(">iiiii", *R2LHEADER))
offsetInKey = offsetInKey + msgSize
# Write menssage size in bytes
file.write(pack(">Q", msgSize))
# Write offset in key to decrypt message
file.write(pack(">Q", offsetInKey))
# Write Key UUID for easy key management
file.write(pack('>QQ', (keyUUID.int >> 64) & max_int64,
keyUUID.int & max_int64))
#write message it's self
ciphered = str(ciphered)
file.write(pack(">{}s".format(msgSize), ciphered))
# Get hash for the message
msgHash = hashSum(ciphered)
msgHashint = msgHash.decode("hex")
msgHashArray = bytearray(msgHashint)
hashSize = msgHashArray.count(msgHashArray)
print("ESCRITURA -> offset: {}, L2R: {}".format(offsetInKey, l2r) )
file.write(msgHashArray)
|
bsd-3-clause
|
dhruvagarwal/django
|
django/utils/_os.py
|
502
|
3581
|
from __future__ import unicode_literals
import os
import sys
import tempfile
from os.path import abspath, dirname, isabs, join, normcase, normpath, sep
from django.core.exceptions import SuspiciousFileOperation
from django.utils import six
from django.utils.encoding import force_text
if six.PY2:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# Under Python 2, define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII characters
# in it. This isn't necessary on Windows since the Windows version of abspath
# handles this correctly. It also handles drive letters differently than the
# pure Python implementation, so it's best not to replace it.
if six.PY3 or os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(fs_encoding)
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2.
"""
if six.PY2 and not isinstance(path, bytes):
return path.encode(fs_encoding)
return path
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspathu(join(base, *paths))
base_path = abspathu(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
A function to check if creating symlinks are supported in the
host platform and/or if they are allowed to be created (e.g.
on Windows it requires admin permissions).
"""
tmpdir = tempfile.mkdtemp()
original_path = os.path.join(tmpdir, 'original')
symlink_path = os.path.join(tmpdir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError, AttributeError):
supported = False
else:
os.remove(symlink_path)
finally:
os.rmdir(original_path)
os.rmdir(tmpdir)
return supported
|
bsd-3-clause
|
kalessin/scrapy
|
docs/utils/linkfix.py
|
141
|
1764
|
#!/usr/bin/python
"""
Linkfix - a companion to sphinx's linkcheck builder.
Uses the linkcheck's output file to fix links in docs.
Originally created for this issue:
https://github.com/scrapy/scrapy/issues/606
Author: dufferzafar
"""
import re
# Used for remembering the file (and its contents)
# so we don't have to open the same file again.
_filename = None
_contents = None
# A regex that matches standard linkcheck output lines
line_re = re.compile(ur'(.*)\:\d+\:\s\[(.*)\]\s(?:(.*)\sto\s(.*)|(.*))')
# Read lines from the linkcheck output file
try:
with open("build/linkcheck/output.txt") as out:
output_lines = out.readlines()
except IOError:
print("linkcheck output not found; please run linkcheck first.")
exit(1)
# For every line, fix the respective file
for line in output_lines:
match = re.match(line_re, line)
if match:
newfilename = match.group(1)
errortype = match.group(2)
# Broken links can't be fixed and
# I am not sure what do with the local ones.
if errortype.lower() in ["broken", "local"]:
print("Not Fixed: " + line)
else:
# If this is a new file
if newfilename != _filename:
# Update the previous file
if _filename:
with open(_filename, "w") as _file:
_file.write(_contents)
_filename = newfilename
# Read the new file to memory
with open(_filename) as _file:
_contents = _file.read()
_contents = _contents.replace(match.group(3), match.group(4))
else:
# We don't understand what the current line means!
print("Not Understood: " + line)
|
bsd-3-clause
|
leejir/darkforce
|
juggle/codegen/deletenote.py
|
3
|
1137
|
# 2014-12-17
# build by qianqians
# deletenote
def deletenote(filestr):
genfilestr = []
count = 0
errornote = ""
for i in xrange(len(filestr)):
str = filestr[i]
while(1):
if count == 1:
indexafter = str.find("*/")
if indexafter is not -1:
str = str[indexafter+2:]
count = 0
else:
break
index = str.find('//')
if index is not -1:
str = str[0:index]
else:
indexbegin = str.find("/*")
if indexbegin is not -1:
errornote = str
indexafter = str.find("*/")
if indexafter is not -1:
str = str[0:indexbegin] + str[indexafter+2:]
else:
count = 1
break
if str is not "":
genfilestr.append(str)
break
if count is 1:
raise Exception("c/c++ coding error unpaired /* ", errornote)
return genfilestr
|
gpl-3.0
|
Ivoz/pip
|
pip/_vendor/requests/packages/urllib3/util.py
|
248
|
21407
|
# urllib3/util.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from binascii import hexlify, unhexlify
from collections import namedtuple
from hashlib import md5, sha1
from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT
import time
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError, SSLError, TimeoutStateError
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(six.b(proxy_basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, rest = divmod(len(fingerprint), 2)
if rest or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
if hasattr(obj, 'fp'):
# Object is a container for another file-like object that gets released
# on exhaustion (e.g. HTTPResponse)
return obj.fp is None
return obj.closed
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
context.options |= OP_NO_COMPRESSION
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
|
mit
|
mohamed--abdel-maksoud/chromium.src
|
mojo/public/third_party/jinja2/environment.py
|
614
|
47244
|
# -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.nodes import EvalContext
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound, TemplateRuntimeError
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode
from jinja2._compat import imap, ifilter, string_types, iteritems, \
text_type, reraise, implements_iterator, implements_to_string, \
get_next, encode_filename, PY2, PYPY
from functools import reduce
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instantiated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, string_types):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`lstrip_blocks`
If this is set to ``True`` leading spaces and tabs are stripped
from the start of a line to a block. Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`keep_trailing_newline`
Preserve the trailing newline when rendering templates.
The default is ``False``, which causes a single newline,
if present, to be stripped from the end of the template.
.. versionadded:: 2.7
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox. This flag alone controls the code
#: generation by the compiler.
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneous environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.lstrip_blocks = lstrip_blocks
self.newline_sequence = newline_sequence
self.keep_trailing_newline = keep_trailing_newline
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def add_extension(self, extension):
"""Adds an extension after the environment was created.
.. versionadded:: 2.5
"""
self.extensions.update(load_extensions(self, [extension]))
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in iteritems(attributes):
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, lstrip_blocks=missing,
extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in iteritems(args):
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in iteritems(self.extensions):
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(rv, extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
def call_filter(self, name, value, args=None, kwargs=None,
context=None, eval_ctx=None):
"""Invokes a filter on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.filters.get(name)
if func is None:
raise TemplateRuntimeError('no filter named %r' % name)
args = [value] + list(args or ())
if getattr(func, 'contextfilter', False):
if context is None:
raise TemplateRuntimeError('Attempted to invoke context '
'filter without context')
args.insert(0, context)
elif getattr(func, 'evalcontextfilter', False):
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
elif getattr(func, 'environmentfilter', False):
args.insert(0, self)
return func(*args, **(kwargs or {}))
def call_test(self, name, value, args=None, kwargs=None):
"""Invokes a test on a value the same way the compiler does it.
.. versionadded:: 2.7
"""
func = self.tests.get(name)
if func is None:
raise TemplateRuntimeError('no test named %r' % name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), text_type(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
def _generate(self, source, name, filename, defer_init=False):
"""Internal hook that can be overridden to hook a different generate
method in.
.. versionadded:: 2.5
"""
return generate(source, self, name, filename, defer_init=defer_init)
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec')
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = self._generate(source, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, string_types):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
lstrip_blocks=LSTRIP_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
keep_trailing_newline=KEEP_TRAILING_NEWLINE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
lstrip_blocks, newline_sequence, keep_trailing_newline,
frozenset(extensions), optimized, undefined, finalize, autoescape,
None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(imap(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
@implements_to_string
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@implements_iterator
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, string_types):
fp = open(fp, encoding is None and 'w' or 'wb')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = get_next(self._gen)
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = get_next(generator(get_next(self._gen)))
def __iter__(self):
return self
def __next__(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
|
bsd-3-clause
|
R4stl1n/allianceauth
|
allianceauth/services/modules/smf/views.py
|
5
|
5057
|
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.shortcuts import render, redirect
from allianceauth.services.forms import ServicePasswordForm
from .manager import SmfManager
from .models import SmfUser
from .tasks import SmfTasks
logger = logging.getLogger(__name__)
ACCESS_PERM = 'smf.access_smf'
@login_required
@permission_required(ACCESS_PERM)
def activate_smf(request):
logger.debug("activate_smf called by user %s" % request.user)
# Valid now we get the main characters
character = request.user.profile.main_character
logger.debug("Adding smf user for user %s with main character %s" % (request.user, character))
result = SmfManager.add_user(SmfTasks.get_username(request.user), request.user.email, ['Member'],
character.character_id)
# if empty we failed
if result[0] != "":
SmfUser.objects.update_or_create(user=request.user, defaults={'username': result[0]})
logger.debug("Updated authserviceinfo for user %s with smf credentials. Updating groups." % request.user)
SmfTasks.update_groups.delay(request.user.pk)
logger.info("Successfully activated smf for user %s" % request.user)
messages.success(request, 'Activated SMF account.')
credentials = {
'username': result[0],
'password': result[1],
}
return render(request, 'services/service_credentials.html',
context={'credentials': credentials, 'service': 'SMF'})
else:
logger.error("Unsuccessful attempt to activate smf for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def deactivate_smf(request):
logger.debug("deactivate_smf called by user %s" % request.user)
result = SmfTasks.delete_user(request.user)
# false we failed
if result:
logger.info("Successfully deactivated smf for user %s" % request.user)
messages.success(request, 'Deactivated SMF account.')
else:
logger.error("Unsuccessful attempt to activate smf for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def reset_smf_password(request):
logger.debug("reset_smf_password called by user %s" % request.user)
character = request.user.profile.main_character
if SmfTasks.has_account(request.user) and character is not None:
result = SmfManager.update_user_password(request.user.smf.username, character.character_id)
# false we failed
if result != "":
logger.info("Successfully reset smf password for user %s" % request.user)
messages.success(request, 'Reset SMF password.')
credentials = {
'username': request.user.smf.username,
'password': result,
}
return render(request, 'services/service_credentials.html',
context={'credentials': credentials, 'service': 'SMF'})
logger.error("Unsuccessful attempt to reset smf password for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
@login_required
@permission_required(ACCESS_PERM)
def set_smf_password(request):
logger.debug("set_smf_password called by user %s" % request.user)
if request.method == 'POST':
logger.debug("Received POST request with form.")
form = ServicePasswordForm(request.POST)
logger.debug("Form is valid: %s" % form.is_valid())
character = request.user.profile.main_character
if form.is_valid() and SmfTasks.has_account(request.user) and character is not None:
password = form.cleaned_data['password']
logger.debug("Form contains password of length %s" % len(password))
result = SmfManager.update_user_password(request.user.smf.username, character.character_id,
password=password)
if result != "":
logger.info("Successfully set smf password for user %s" % request.user)
messages.success(request, 'Set SMF password.')
else:
logger.error("Failed to install custom smf password for user %s" % request.user)
messages.error(request, 'An error occurred while processing your SMF account.')
return redirect("services:services")
else:
logger.debug("Request is not type POST - providing empty form.")
form = ServicePasswordForm()
logger.debug("Rendering form for user %s" % request.user)
context = {'form': form, 'service': 'SMF'}
return render(request, 'services/service_password.html', context=context)
|
gpl-2.0
|
u9621071/kernel-uek-UEK3
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
pmoulon/TheiaSfM
|
docs/make_docs.py
|
19
|
2872
|
#!/usr/bin/python
#
# Ceres Solver - A fast non-linear least squares minimizer
# Copyright 2013 Google Inc. All rights reserved.
# http://code.google.com/p/ceres-solver/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: [email protected] (Sameer Agarwal)
#
# Note: You will need Sphinx and Pygments installed for this to work.
import glob
import os
import sys
# Number of arguments
N = len(sys.argv)
if N < 3:
print "make_docs.py src_root destination_root"
sys.exit(1)
src_dir = sys.argv[1] + "/docs/source"
build_root = sys.argv[2]
cache_dir = build_root + "/doctrees"
html_dir = build_root + "/html"
# Called from Command Line
if N == 3:
sphinx_exe = "sphinx-build"
# Called from CMake (using the SPHINX_EXECUTABLE found)
elif N == 4:
sphinx_exe = sys.argv[3]
# Run Sphinx to build the documentation.
os.system("%s -b html -d %s %s %s" %(sphinx_exe, cache_dir, src_dir, html_dir))
input_pattern = """config=TeX-AMS-MML_HTMLorMML"></script>"""
output_pattern = """config=TeX-AMS_HTML">
MathJax.Hub.Config({
"HTML-CSS": {
availableFonts: ["TeX"]
}
});
</script>"""
# By default MathJax uses does not use TeX fonts. This simple search
# and replace fixes that.
for name in glob.glob("%s/*.html" % html_dir):
print "Postprocessing: ", name
fptr = open(name)
out = fptr.read().replace(input_pattern, output_pattern)
fptr.close()
fptr = open(name, "w")
fptr.write(out)
fptr.close()
|
bsd-3-clause
|
cloudify-cosmo/softlayer-python
|
SoftLayer/managers/sshkey.py
|
5
|
2631
|
"""
SoftLayer.sshkey
~~~~~~~~~~~~~~~~
SSH Key Manager/helpers
:license: MIT, see LICENSE for more details.
"""
from SoftLayer import utils
class SshKeyManager(utils.IdentifierMixin, object):
"""Manages account SSH keys.
:param SoftLayer.API.Client client: an API client instance
"""
def __init__(self, client):
self.client = client
self.sshkey = client['Security_Ssh_Key']
self.resolvers = [self._get_ids_from_label]
def add_key(self, key, label, notes=None):
"""Adds a new SSH key to the account.
:param string key: The SSH key to add
:param string label: The label for the key
:returns: A dictionary of the new key's information.
"""
order = {
'key': key,
'label': label,
'notes': notes,
}
return self.sshkey.createObject(order)
def delete_key(self, key_id):
"""Permanently deletes an SSH key from the account.
:param int key_id: The ID of the key to delete
"""
return self.sshkey.deleteObject(id=key_id)
def edit_key(self, key_id, label=None, notes=None):
"""Edits information about an SSH key.
:param int key_id: The ID of the key to edit
:param string label: The new label for the key
:param string notes: Notes to set or change on the key
:returns: A Boolean indicating success or failure
"""
data = {}
if label:
data['label'] = label
if notes:
data['notes'] = notes
return self.sshkey.editObject(data, id=key_id)
def get_key(self, key_id):
"""Returns full information about a single SSH key.
:param int key_id: The ID of the key to retrieve
:returns: A dictionary of information about the key
"""
return self.sshkey.getObject(id=key_id)
def list_keys(self, label=None):
"""Lists all SSH keys on the account.
:param string label: Filter list based on SSH key label
:returns: A list of dictionaries with information about each key
"""
_filter = utils.NestedDict({})
if label:
_filter['sshKeys']['label'] = utils.query_filter(label)
return self.client['Account'].getSshKeys(filter=_filter.to_dict())
def _get_ids_from_label(self, label):
"""Return sshkey IDs which match the given label."""
keys = self.list_keys()
results = []
for key in keys:
if key['label'] == label:
results.append(key['id'])
return results
|
mit
|
agramian/PythonExternalProgramTestFramework
|
reportlab/graphics/samples/radar.py
|
42
|
3252
|
#Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.charts.legends import Legend
from reportlab.graphics.samples.excelcolors import *
from reportlab.graphics.charts.spider import SpiderChart
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin, String
from reportlab.graphics.charts.textlabels import Label
class RadarChart(_DrawingEditorMixin,Drawing):
def __init__(self,width=200,height=150,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self._add(self,SpiderChart(),name='chart',validate=None,desc="The main chart")
self.chart.width = 90
self.chart.height = 90
self.chart.x = 45
self.chart.y = 25
self.chart.strands[0].strokeColor= color01
self.chart.strands[1].strokeColor= color02
self.chart.strands[2].strokeColor= color03
self.chart.strands[3].strokeColor= color04
self.chart.strands[4].strokeColor= color05
self.chart.strands[5].strokeColor= color06
self.chart.strands[6].strokeColor= color07
self.chart.strands[7].strokeColor= color08
self.chart.strands[8].strokeColor= color09
self.chart.strands[9].strokeColor= color10
self.chart.strands[0].fillColor = None
self.chart.strands[1].fillColor = None
self.chart.strands[2].fillColor = None
self.chart.strands[3].fillColor = None
self.chart.strands[4].fillColor = None
self.chart.strands[5].fillColor = None
self.chart.strands[6].fillColor = None
self.chart.strands[7].fillColor = None
self.chart.strands[8].fillColor = None
self.chart.strands[9].fillColor = None
self.chart.strands.strokeWidth = 1
self.chart.strandLabels.fontName = 'Helvetica'
self.chart.strandLabels.fontSize = 6
self.chart.fillColor = backgroundGrey
self.chart.data = [(125, 180, 200), (100, 150, 180)]
self.chart.labels = ['North', 'South', 'Central']
self._add(self,Label(),name='Title',validate=None,desc="The title at the top of the chart")
self.Title.fontName = 'Helvetica-Bold'
self.Title.fontSize = 7
self.Title.x = 100
self.Title.y = 135
self.Title._text = 'Chart Title'
self.Title.maxWidth = 180
self.Title.height = 20
self.Title.textAnchor ='middle'
self._add(self,Legend(),name='Legend',validate=None,desc="The legend or key for the chart")
self.Legend.colorNamePairs = [(color01, 'Widgets'), (color02, 'Sprockets')]
self.Legend.fontName = 'Helvetica'
self.Legend.fontSize = 7
self.Legend.x = 153
self.Legend.y = 85
self.Legend.dxTextSpace = 5
self.Legend.dy = 5
self.Legend.dx = 5
self.Legend.deltay = 5
self.Legend.alignment ='right'
self.chart.strands.strokeWidth = 1
self._add(self,0,name='preview',validate=None,desc=None)
if __name__=="__main__": #NORUNTESTS
RadarChart().save(formats=['pdf'],outDir=None,fnRoot='radar')
|
mit
|
TheMOOCAgency/edx-platform
|
openedx/core/djangoapps/course_groups/tests/test_partition_scheme.py
|
7
|
16239
|
"""
Test the partitions and partitions service
"""
import json
from django.conf import settings
import django.test
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipUnless
from courseware.masquerade import handle_ajax, setup_masquerade
from courseware.tests.test_masquerade import StaffMasqueradeTestCase
from student.tests.factories import UserFactory
from xmodule.partitions.partitions import Group, UserPartition, UserPartitionError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.factories import ToyCourseFactory
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from ..partition_scheme import CohortPartitionScheme, get_cohorted_user_partition
from ..models import CourseUserGroupPartitionGroup
from ..views import link_cohort_to_partition_group, unlink_cohort_partition_group
from ..cohorts import add_user_to_cohort, remove_user_from_cohort, get_course_cohorts
from .helpers import CohortFactory, config_course_cohorts
@attr(shard=2)
class TestCohortPartitionScheme(ModuleStoreTestCase):
"""
Test the logic for linking a user to a partition group based on their cohort.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestCohortPartitionScheme, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
config_course_cohorts(self.course, is_cohorted=True)
self.groups = [Group(10, 'Group 10'), Group(20, 'Group 20')]
self.user_partition = UserPartition(
0,
'Test Partition',
'for testing purposes',
self.groups,
scheme=CohortPartitionScheme
)
self.student = UserFactory.create()
def assert_student_in_group(self, group, partition=None):
"""
Utility for checking that our test student comes up as assigned to the
specified partition (or, if None, no partition at all)
"""
self.assertEqual(
CohortPartitionScheme.get_group_for_user(
self.course_key,
self.student,
partition or self.user_partition,
use_cached=False
),
group
)
def test_student_cohort_assignment(self):
"""
Test that the CohortPartitionScheme continues to return the correct
group for a student as the student is moved in and out of different
cohorts.
"""
first_cohort, second_cohort = [
CohortFactory(course_id=self.course_key) for _ in range(2)
]
# place student 0 into first cohort
add_user_to_cohort(first_cohort, self.student.username)
self.assert_student_in_group(None)
# link first cohort to group 0 in the partition
link_cohort_to_partition_group(
first_cohort,
self.user_partition.id,
self.groups[0].id,
)
# link second cohort to to group 1 in the partition
link_cohort_to_partition_group(
second_cohort,
self.user_partition.id,
self.groups[1].id,
)
self.assert_student_in_group(self.groups[0])
# move student from first cohort to second cohort
add_user_to_cohort(second_cohort, self.student.username)
self.assert_student_in_group(self.groups[1])
# move the student out of the cohort
remove_user_from_cohort(second_cohort, self.student.username)
self.assert_student_in_group(None)
def test_cohort_partition_group_assignment(self):
"""
Test that the CohortPartitionScheme returns the correct group for a
student in a cohort when the cohort link is created / moved / deleted.
"""
test_cohort = CohortFactory(course_id=self.course_key)
# assign user to cohort (but cohort isn't linked to a partition group yet)
add_user_to_cohort(test_cohort, self.student.username)
# scheme should not yet find any link
self.assert_student_in_group(None)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# now the scheme should find a link
self.assert_student_in_group(self.groups[0])
# link cohort to group 1 (first unlink it from group 0)
unlink_cohort_partition_group(test_cohort)
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[1].id,
)
# scheme should pick up the link
self.assert_student_in_group(self.groups[1])
# unlink cohort from anywhere
unlink_cohort_partition_group(
test_cohort,
)
# scheme should now return nothing
self.assert_student_in_group(None)
def test_student_lazily_assigned(self):
"""
Test that the lazy assignment of students to cohorts works
properly when accessed via the CohortPartitionScheme.
"""
# don't assign the student to any cohort initially
self.assert_student_in_group(None)
# get the default cohort, which is automatically created
# during the `get_course_cohorts` API call if it doesn't yet exist
cohort = get_course_cohorts(self.course)[0]
# map that cohort to a group in our partition
link_cohort_to_partition_group(
cohort,
self.user_partition.id,
self.groups[0].id,
)
# The student will be lazily assigned to the default cohort
# when CohortPartitionScheme.get_group_for_user makes its internal
# call to cohorts.get_cohort.
self.assert_student_in_group(self.groups[0])
def setup_student_in_group_0(self):
"""
Utility to set up a cohort, add our student to the cohort, and link
the cohort to self.groups[0]
"""
test_cohort = CohortFactory(course_id=self.course_key)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# place student into cohort
add_user_to_cohort(test_cohort, self.student.username)
# check link is correct
self.assert_student_in_group(self.groups[0])
def test_partition_changes_nondestructive(self):
"""
If the name of a user partition is changed, or a group is added to the
partition, links from cohorts do not break.
If the name of a group is changed, links from cohorts do not break.
"""
self.setup_student_in_group_0()
# to simulate a non-destructive configuration change on the course, create
# a new partition with the same id and scheme but with groups renamed and
# a group added
new_groups = [Group(10, 'New Group 10'), Group(20, 'New Group 20'), Group(30, 'New Group 30')]
new_user_partition = UserPartition(
0, # same id
'Different Partition',
'dummy',
new_groups,
scheme=CohortPartitionScheme,
)
# the link should still work
self.assert_student_in_group(new_groups[0], new_user_partition)
def test_missing_group(self):
"""
If the group is deleted (or its id is changed), there's no referential
integrity enforced, so any references from cohorts to that group will be
lost. A warning should be logged when links are found from cohorts to
groups that no longer exist.
"""
self.setup_student_in_group_0()
# to simulate a destructive change on the course, create a new partition
# with the same id, but different group ids.
new_user_partition = UserPartition(
0, # same id
'Another Partition',
'dummy',
[Group(11, 'Not Group 10'), Group(21, 'Not Group 20')], # different ids
scheme=CohortPartitionScheme,
)
# the partition will be found since it has the same id, but the group
# ids aren't present anymore, so the scheme returns None (and logs a
# warning)
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'group not found')
def test_missing_partition(self):
"""
If the user partition is deleted (or its id is changed), there's no
referential integrity enforced, so any references from cohorts to that
partition's groups will be lost. A warning should be logged when links
are found from cohorts to partitions that do not exist.
"""
self.setup_student_in_group_0()
# to simulate another destructive change on the course, create a new
# partition with a different id, but using the same groups.
new_user_partition = UserPartition(
1, # different id
'Moved Partition',
'dummy',
[Group(10, 'Group 10'), Group(20, 'Group 20')], # same ids
scheme=CohortPartitionScheme,
)
# the partition will not be found even though the group ids match, so the
# scheme returns None (and logs a warning).
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'partition mismatch')
@attr(shard=2)
class TestExtension(django.test.TestCase):
"""
Ensure that the scheme extension is correctly plugged in (via entry point
in setup.py)
"""
def test_get_scheme(self):
self.assertEqual(UserPartition.get_scheme('cohort'), CohortPartitionScheme)
with self.assertRaisesRegexp(UserPartitionError, 'Unrecognized scheme'):
UserPartition.get_scheme('other')
@attr(shard=2)
class TestGetCohortedUserPartition(ModuleStoreTestCase):
"""
Test that `get_cohorted_user_partition` returns the first user_partition with scheme `CohortPartitionScheme`.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestGetCohortedUserPartition, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
self.student = UserFactory.create()
self.random_user_partition = UserPartition(
1,
'Random Partition',
'Should not be returned',
[Group(0, 'Group 0'), Group(1, 'Group 1')],
scheme=RandomUserPartitionScheme
)
self.cohort_user_partition = UserPartition(
0,
'Cohort Partition 1',
'Should be returned',
[Group(10, 'Group 10'), Group(20, 'Group 20')],
scheme=CohortPartitionScheme
)
self.second_cohort_user_partition = UserPartition(
2,
'Cohort Partition 2',
'Should not be returned',
[Group(10, 'Group 10'), Group(1, 'Group 1')],
scheme=CohortPartitionScheme
)
def test_returns_first_cohort_user_partition(self):
"""
Test get_cohorted_user_partition returns first user_partition with scheme `CohortPartitionScheme`.
"""
self.course.user_partitions.append(self.random_user_partition)
self.course.user_partitions.append(self.cohort_user_partition)
self.course.user_partitions.append(self.second_cohort_user_partition)
self.assertEqual(self.cohort_user_partition, get_cohorted_user_partition(self.course))
def test_no_cohort_user_partitions(self):
"""
Test get_cohorted_user_partition returns None when there are no cohorted user partitions.
"""
self.course.user_partitions.append(self.random_user_partition)
self.assertIsNone(get_cohorted_user_partition(self.course))
@attr(shard=2)
class TestMasqueradedGroup(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestMasqueradedGroup, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
self.session = {}
modulestore().update_item(self.course, self.test_user.id)
def _verify_masquerade_for_group(self, group):
"""
Verify that the masquerade works for the specified group id.
"""
# Send the request to set the masquerade
request_json = {
"role": "student",
"user_partition_id": self.user_partition.id,
"group_id": group.id if group is not None else None
}
request = self._create_mock_json_request(
self.test_user,
data=request_json,
session=self.session
)
response = handle_ajax(request, unicode(self.course.id))
# pylint has issues analyzing this class (maybe due to circular imports?)
self.assertEquals(response.status_code, 200) # pylint: disable=no-member
# Now setup the masquerade for the test user
setup_masquerade(request, self.course.id, True)
scheme = self.user_partition.scheme
self.assertEqual(
scheme.get_group_for_user(self.course.id, self.test_user, self.user_partition),
group
)
def _verify_masquerade_for_all_groups(self):
"""
Verify that the staff user can masquerade as being in all groups
as well as no group.
"""
self._verify_masquerade_for_group(self.user_partition.groups[0])
self._verify_masquerade_for_group(self.user_partition.groups[1])
self._verify_masquerade_for_group(None)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
self._verify_masquerade_for_all_groups()
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade_with_cohort(self):
"""
Tests that a staff member can masquerade as being in a particular group
when that staff member also belongs to a cohort with a corresponding
group.
"""
self.course.cohort_config = {'cohorted': True}
modulestore().update_item(self.course, self.test_user.id) # pylint: disable=no-member
cohort = CohortFactory.create(course_id=self.course.id, users=[self.test_user])
CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=self.user_partition.id,
group_id=self.user_partition.groups[0].id
).save()
# When the staff user is masquerading as being in a None group
# (within an existent UserPartition), we should treat that as
# an explicit None, not defaulting to the user's cohort's
# partition group.
self._verify_masquerade_for_all_groups()
|
agpl-3.0
|
coberger/DIRAC
|
DataManagementSystem/scripts/dirac-dms-show-se-status.py
|
7
|
1713
|
#!/usr/bin/env python
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Get status of the available Storage Elements
Usage:
%s [<options>]
""" % Script.scriptName )
Script.parseCommandLine()
import DIRAC
from DIRAC import gConfig,gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Utilities.List import sortList
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
if __name__ == "__main__":
result = getVOfromProxyGroup()
if not result['OK']:
gLogger.notice( 'Error:', result['Message'] )
DIRAC.exit( 1 )
vo = result['Value']
resources = Resources( vo = vo )
result = resources.getEligibleStorageElements()
if not result['OK']:
gLogger.notice( 'Error:', result['Message'] )
DIRAC.exit( 2 )
seList = sortList( result[ 'Value' ] )
resourceStatus = ResourceStatus()
result = resourceStatus.getStorageStatus( seList )
if not result['OK']:
gLogger.notice( 'Error:', result['Message'] )
DIRAC.exit( 3 )
for k,v in result[ 'Value' ].items():
readState, writeState = 'Active', 'Active'
if v.has_key( 'ReadAccess' ):
readState = v[ 'ReadAccess' ]
if v.has_key( 'WriteAccess' ):
writeState = v[ 'WriteAccess']
gLogger.notice("%s %s %s" % ( k.ljust(25),readState.rjust(15),writeState.rjust(15)) )
DIRAC.exit(0)
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
gpl-3.0
|
nafraf/spreads
|
spreadsplug/intervaltrigger.py
|
5
|
2788
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Trigger plugin that triggers in a configurable interval. """
from __future__ import unicode_literals
import logging
import threading
import time
from spreads.config import OptionTemplate
from spreads.plugin import HookPlugin, TriggerHooksMixin
logger = logging.getLogger('spreadsplug.intervaltrigger')
class IntervalTrigger(HookPlugin, TriggerHooksMixin):
__name__ = 'intervaltrigger'
_loop_thread = None
_exit_event = None
@classmethod
def configuration_template(cls):
return {'interval': OptionTemplate(5.0, "Interval between captures"
" (in seconds)")}
def start_trigger_loop(self, capture_callback):
""" Launch the triggering loop in a background thread.
:param capture_callback: Callback for triggering a capture
:type capture_callback: function
"""
logger.debug("Starting event loop")
self._exit_event = threading.Event()
self._loop_thread = threading.Thread(target=self._trigger_loop,
args=(capture_callback, ))
self._loop_thread.start()
def stop_trigger_loop(self):
""" Stop the triggering loop and its thread. """
if self._exit_event:
logger.debug("Stopping event loop")
self._exit_event.set()
if self._loop_thread:
self._loop_thread.join()
def _trigger_loop(self, capture_func):
""" Read interval from configuration and run a loop that captures every
time the interval has elapsed.
:param capture_func: Callback for triggering a capture
:type capture_func: function
"""
interval = self.config['interval'].get(float)
while True and interval > 0.0:
sleep_time = 0
while sleep_time < interval:
if self._exit_event.is_set():
return
time.sleep(0.01)
sleep_time += 0.01
capture_func()
|
agpl-3.0
|
alexalemi/battleship
|
players/util.py
|
1
|
3217
|
"""
Author: Alex Alemi
Some utility routines for python players
"""
import logging
import socket
import os
import sys
from random import randrange
ship_sizes = {"A": 5, "B": 4, "D": 3, "S": 3, "P": 2}
def board_str(board):
""" Return the many lined string for a board """
boardstr = ""
for i in xrange(10):
for j in xrange(10):
if (i,j) in board:
boardstr += board[(i,j)]
else:
boardstr += '0'
boardstr += '\n'
return boardstr
def gen_random_board():
""" Generate a random board """
def place_ship(board, ship):
size = ship_sizes[ship]
orientation = randrange(2)
if orientation:
# if we are trying to place it horizontally
xpos = randrange(10-size)
ypos = randrange(10)
for i in xrange(size):
loc = (xpos+i, ypos)
if board.get(loc):
# we have a collision
raise IndexError
else:
board[loc] = ship
else:
# if we are trying to place it vertically
xpos = randrange(10)
ypos = randrange(10-size)
for i in xrange(size):
loc = (xpos, ypos+i)
if board.get(loc):
# we have a collision
raise IndexError
else:
board[loc] = ship
return board
done = False
while not done:
# Generate boards until we manage to not fail
board = {}
for ship,size in ship_sizes.iteritems():
try:
board = place_ship(board, ship)
except IndexError:
break
else:
done = True
return board
def gen_random_board_str():
return board_str(gen_random_board())
class LocalCommunication(object):
""" A very simple local communication thing
which can be used to locally test your
program
"""
def readline(self):
msg = raw_input()
return msg
def sendline(self,msg):
print(msg)
class Communication(object):
""" A simple communication wrapper, use
comm = Communication()
at which point you can use comm.readline() to read a line
and comm.sendline(msg) to send a line, sendline
will automatically add the newline at the end.
"""
def __init__(self):
self.port = int(sys.argv[1])
logging.debug("Got port %d", self.port)
# Create the socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_address = ('localhost', self.port)
logging.debug("Connection to %r", self.server_address)
self.sock.connect(self.server_address)
self.sock_file = self.sock.makefile("rw")
logging.debug("Connected")
def readline(self):
msg = self.sock_file.readline()
logging.debug("Read line %s", msg.strip())
return msg
def sendline(self,msg):
logging.debug("Sending line %s", msg.strip())
self.sock_file.write(msg + '\n')
self.sock_file.flush()
|
mit
|
GorK-ChO/selenium
|
py/test/selenium/webdriver/chrome/chrome_network_emulation_tests.py
|
29
|
1252
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver import Chrome
def test_network_conditions_emulation():
driver = Chrome()
driver.set_network_conditions(
offline=False,
latency=56, # additional latency (ms)
throughput=789)
conditions = driver.get_network_conditions()
assert conditions['offline'] is False
assert conditions['latency'] == 56
assert conditions['download_throughput'] == 789
assert conditions['upload_throughput'] == 789
|
apache-2.0
|
OpenPymeMx/account-financial-reporting
|
account_financial_report_webkit/report/partner_balance.py
|
29
|
4238
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright Camptocamp SA 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import pooler
from openerp.report import report_sxw
from openerp.tools.translate import _
from .common_partner_balance_reports \
import CommonPartnerBalanceReportHeaderWebkit
from .webkit_parser_header_fix import HeaderFooterTextWebKitParser
class PartnerBalanceWebkit(report_sxw.rml_parse,
CommonPartnerBalanceReportHeaderWebkit):
def __init__(self, cursor, uid, name, context):
super(PartnerBalanceWebkit, self).__init__(
cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
company = self.pool.get('res.users').browse(
self.cr, uid, uid, context=context).company_id
header_report_name = ' - '.join((_('PARTNER BALANCE'),
company.name,
company.currency_id.name))
footer_date_time = self.formatLang(
str(datetime.today()), date_time=True)
self.localcontext.update({
'cr': cursor,
'uid': uid,
'report_name': _('Partner Balance'),
'display_account': self._get_display_account,
'display_account_raw': self._get_display_account_raw,
'filter_form': self._get_filter,
'target_move': self._get_target_move,
'display_target_move': self._get_display_target_move,
'display_partner_account': self._get_display_partner_account,
'accounts': self._get_accounts_br,
'additional_args': [
('--header-font-name', 'Helvetica'),
('--footer-font-name', 'Helvetica'),
('--header-font-size', '10'),
('--footer-font-size', '6'),
('--header-left', header_report_name),
('--header-spacing', '2'),
('--footer-left', footer_date_time),
('--footer-right',
' '.join((_('Page'), '[page]', _('of'), '[topage]'))),
('--footer-line',),
],
})
def _get_initial_balance_mode(self, start_period):
""" Force computing of initial balance for the partner balance,
because we cannot use the entries generated by
OpenERP in the opening period.
OpenERP allows to reconcile move lines between different partners,
so the generated entries in the opening period are unreliable.
"""
return 'initial_balance'
def set_context(self, objects, data, ids, report_type=None):
"""Populate a ledger_lines attribute on each browse record that will
be used by mako template"""
objects, new_ids, context_report_values = self.\
compute_partner_balance_data(data)
self.localcontext.update(context_report_values)
return super(PartnerBalanceWebkit, self).set_context(
objects, data, new_ids, report_type=report_type)
HeaderFooterTextWebKitParser(
'report.account.account_report_partner_balance_webkit',
'account.account',
'addons/account_financial_report_webkit/report/templates/\
account_report_partner_balance.mako',
parser=PartnerBalanceWebkit)
|
agpl-3.0
|
itkinside/ufs
|
itkufs/common/views/display.py
|
1
|
3418
|
from operator import itemgetter
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render
from itkufs.common.decorators import limit_to_group, limit_to_owner
from itkufs.accounting.models import Account, Group
@login_required
@limit_to_group
def group_summary(request, group, is_admin=False):
"""Show group summary"""
return render(
request,
"common/group_summary.html",
{
"is_admin": is_admin,
"all": "all" in request.GET,
"group": Group.objects.select_related().get(id=group.id),
},
)
@login_required
@limit_to_owner
def account_summary(request, group, account, is_admin=False, is_owner=False):
"""Show account summary"""
if is_owner:
# Set active account in session
request.session["my_account"] = {
"group_slug": account.group.slug,
"account_slug": account.slug,
}
# Warn owner of account about a low balance
if account.is_blocked():
messages.error(
request,
"The account balance is below the block limit, please "
"contact the group admin or deposit enough to pass the "
"limit.",
)
elif account.needs_warning():
messages.warning(
request, "The account balance is below the warning limit."
)
return render(
request,
"common/account_summary.html",
{
"is_admin": is_admin,
"is_owner": is_owner,
"group": group,
"account": Account.objects.select_related().get(id=account.id),
"balance_data": _generate_gchart_data(
account.get_balance_history_set()
),
},
)
@login_required
@limit_to_group
def group_balance_graph(request, group, is_admin=False):
accounts = (
Account.objects.all()
.filter(group_id=group.id, active=True, group_account=False)
.order_by("name")
)
data = []
for a in accounts:
data.append([a.short_name, a.normal_balance()])
graph_data = ['[ "%s", %d ]' % (a[0], a[1]) for a in data]
data = sorted(data, key=itemgetter(1), reverse=True)
graph_data_sorted = ['[ "%s", %d ]' % (a[0], a[1]) for a in data]
graph_data_positive = []
graph_data_negative = []
for a in data:
if a[1] >= 0:
graph_data_positive.append('[ "%s", %d ]' % (a[0], a[1]))
else:
graph_data_negative.append('[ "%s", %d ]' % (a[0], -a[1]))
return render(
request,
"common/group_balance_graph.html",
{
"group": Group.objects.select_related().get(id=group.id),
"graph_data": ",\n".join(graph_data),
"graph_data_sorted": ",\n".join(graph_data_sorted),
"graph_data_positive": ",\n".join(graph_data_positive),
"graph_data_negative": ",\n".join(graph_data_negative),
},
)
def _generate_gchart_data(dataset):
# aggregate data
agg = 0.0
history = []
for i in range(len(dataset)):
saldo = float(dataset[i].saldo)
history.append((dataset[i].date, saldo + agg))
agg += saldo
items = [f"[ new Date({date}), {balance:.2f}]" for date, balance in history]
return ",\n".join(items)
|
gpl-2.0
|
bbbenja/SickRage
|
lib/sqlalchemy/dialects/oracle/zxjdbc.py
|
79
|
7744
|
# oracle/zxjdbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
"""
import decimal
import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
from sqlalchemy.engine import result as _result
from sqlalchemy.sql import expression
import collections
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
#XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, 'returning_parameters'):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, 'returning_parameters'):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
next(rrs)
except SQLException as sqle:
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, 'name'):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = 'oracle'
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date: _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(
self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(
self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
|
gpl-3.0
|
sleepinghungry/wwif
|
students/simone/aiy yiy yiy.py
|
1
|
1028
|
Python 3.5.2 (v3.5.2:4def2a2901a5, Jun 25 2016, 22:18:55) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> file
Traceback (most recent call last):
File "<pyshell#0>", line 1, in <module>
file
NameError: name 'file' is not defined
>>>
RESTART: Z:\Coding Classes\Python Text Adventures\wwif\students\simone\hangman.py
Warning (from warnings module):
File "C:\Users\wwclc\AppData\Local\Programs\Python\Python35\lib\getpass.py", line 101
return fallback_getpass(prompt, stream)
GetPassWarning: Can not control echo on the terminal.
Warning: Password input may be echoed.
Player 1, enter a word
RESTART: Z:\Coding Classes\Python Text Adventures\wwif\students\simone\hangman.py
Warning (from warnings module):
File "C:\Users\wwclc\AppData\Local\Programs\Python\Python35\lib\getpass.py", line 101
return fallback_getpass(prompt, stream)
GetPassWarning: Can not control echo on the terminal.
Warning: Password input may be echoed.
Player 1, enter a word
|
mit
|
ganxueliang88/idracserver
|
idrac/log_api.py
|
3
|
2265
|
# coding: utf-8
from argparse import ArgumentParser, FileType
from contextlib import closing
from io import open as copen
from json import dumps
from math import ceil
import re
from os.path import basename, dirname, exists, join
from struct import unpack
from subprocess import Popen
from sys import platform, prefix, stderr
from tempfile import NamedTemporaryFile
from jinja2 import FileSystemLoader, Template
from jinja2.environment import Environment
from jumpserver.api import BASE_DIR
DEFAULT_TEMPLATE = join(BASE_DIR, 'templates', 'jlog', 'static.jinja2')
rz_pat = re.compile(r'\x18B\w+\r\x8a(\x11)?')
def escapeString(string):
string = rz_pat.sub('', string)
try:
string = string.encode('unicode_escape').decode('utf-8', 'ignore')
except (UnicodeEncodeError, UnicodeDecodeError):
string = string.decode('utf-8', 'ignore')
string = string.replace("'", "\\'")
string = '\'' + string + '\''
return string
def getTiming(timef):
timing = None
with closing(timef):
timing = [l.strip().split(' ') for l in timef]
timing = [(int(ceil(float(r[0]) * 1000)), int(r[1])) for r in timing]
return timing
def scriptToJSON(scriptf, timing=None):
ret = []
with closing(scriptf):
scriptf.readline() # ignore first header line from script file
offset = 0
for t in timing:
dt = scriptf.read(t[1])
data = escapeString(dt)
# print ('###### (%s, %s)' % (t[1], repr(data)))
offset += t[0]
ret.append((data, offset))
return dumps(ret)
def renderTemplate(script_path, time_file_path, dimensions=(24, 80), templatename=DEFAULT_TEMPLATE):
with copen(script_path, encoding='utf-8', errors='replace', newline='\r\n') as scriptf:
# with open(script_path) as scriptf:
with open(time_file_path) as timef:
timing = getTiming(timef)
json = scriptToJSON(scriptf, timing)
fsl = FileSystemLoader(dirname(templatename), 'utf-8')
e = Environment()
e.loader = fsl
templatename = basename(templatename)
rendered = e.get_template(templatename).render(json=json,
dimensions=dimensions)
return rendered
|
gpl-2.0
|
LIS/lis-tempest
|
tempest/tests/cmd/test_tempest_init.py
|
3
|
4205
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import fixtures
from tempest.cmd import init
from tempest.tests import base
class TestTempestInit(base.TestCase):
def test_generate_testr_conf(self):
# Create fake conf dir
conf_dir = self.useFixture(fixtures.TempDir())
init_cmd = init.TempestInit(None, None)
init_cmd.generate_testr_conf(conf_dir.path)
# Generate expected file contents
top_level_path = os.path.dirname(os.path.dirname(init.__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
testr_conf_file = init.TESTR_CONF % (top_level_path, discover_path)
conf_path = conf_dir.join('.testr.conf')
with open(conf_path, 'r') as conf_file:
self.assertEqual(conf_file.read(), testr_conf_file)
def test_generate_sample_config(self):
local_dir = self.useFixture(fixtures.TempDir())
etc_dir_path = os.path.join(local_dir.path, 'etc/')
os.mkdir(etc_dir_path)
tmp_dir = self.useFixture(fixtures.TempDir())
config_dir = os.path.join(tmp_dir.path, 'config/')
shutil.copytree('etc/', config_dir)
init_cmd = init.TempestInit(None, None)
local_sample_conf_file = os.path.join(etc_dir_path,
'tempest.conf.sample')
# Verify no sample config file exist
self.assertFalse(os.path.isfile(local_sample_conf_file))
init_cmd.generate_sample_config(local_dir.path, config_dir)
# Verify sample config file exist with some content
self.assertTrue(os.path.isfile(local_sample_conf_file))
self.assertGreater(os.path.getsize(local_sample_conf_file), 0)
def test_create_working_dir_with_existing_local_dir_non_empty(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
open("%s/foo" % fake_local_dir.path, 'w').close()
_init = init.TempestInit(None, None)
self.assertRaises(OSError,
_init.create_working_dir,
fake_local_dir.path,
fake_local_conf_dir.path)
def test_create_working_dir(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
os.rmdir(fake_local_dir.path)
# Create a fake conf file
fake_file = fake_local_conf_dir.join('conf_file.conf')
open(fake_file, 'w').close()
init_cmd = init.TempestInit(None, None)
init_cmd.create_working_dir(fake_local_dir.path,
fake_local_conf_dir.path)
# Assert directories are created
lock_path = os.path.join(fake_local_dir.path, 'tempest_lock')
etc_dir = os.path.join(fake_local_dir.path, 'etc')
log_dir = os.path.join(fake_local_dir.path, 'logs')
testr_dir = os.path.join(fake_local_dir.path, '.testrepository')
self.assertTrue(os.path.isdir(lock_path))
self.assertTrue(os.path.isdir(etc_dir))
self.assertTrue(os.path.isdir(log_dir))
self.assertTrue(os.path.isdir(testr_dir))
# Assert file creation
fake_file_moved = os.path.join(etc_dir, 'conf_file.conf')
local_conf_file = os.path.join(etc_dir, 'tempest.conf')
local_testr_conf = os.path.join(fake_local_dir.path, '.testr.conf')
self.assertTrue(os.path.isfile(fake_file_moved))
self.assertTrue(os.path.isfile(local_conf_file))
self.assertTrue(os.path.isfile(local_testr_conf))
|
apache-2.0
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/tools/metrics/histograms/update_bad_message_reasons.py
|
31
|
1275
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the various BadMessage enums in histograms.xml file with values read
from the corresponding bad_message.h files.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import sys
from update_histogram_enum import UpdateHistogramEnum
if __name__ == '__main__':
if len(sys.argv) > 1:
print >>sys.stderr, 'No arguments expected!'
sys.stderr.write(__doc__)
sys.exit(1)
histograms = {
'chrome/browser/bad_message.h': 'BadMessageReasonChrome',
'content/browser/bad_message.h': 'BadMessageReasonContent',
'components/nacl/browser/bad_message.h': 'BadMessageReasonNaCl',
'components/password_manager/content/browser/bad_message.h':
'BadMessageReasonPasswordManager',
'extensions/browser/bad_message.h': 'BadMessageReasonExtensions',
}
for header_file, histogram_name in histograms.items():
UpdateHistogramEnum(histogram_enum_name=histogram_name,
source_enum_path=header_file,
start_marker='^enum (class )?BadMessageReason {',
end_marker='^BAD_MESSAGE_MAX')
|
mit
|
autosportlabs/kivy
|
kivy/core/clipboard/__init__.py
|
9
|
4563
|
'''
Clipboard
=========
Core class for accessing the Clipboard. If we are not able to access the
system clipboard, a fake one will be used.
Usage example:
.. code-block:: kv
#:import Clipboard kivy.core.clipboard.Clipboard
Button:
on_release:
self.text = Clipboard.paste()
Clipboard.copy('Data')
'''
__all__ = ('ClipboardBase', 'Clipboard')
from kivy import Logger
from kivy.core import core_select_lib
from kivy.utils import platform
from kivy.setupconfig import USE_SDL2
class ClipboardBase(object):
def get(self, mimetype):
'''Get the current data in clipboard, using the mimetype if possible.
You not use this method directly. Use :meth:`paste` instead.
'''
return None
def put(self, data, mimetype):
'''Put data on the clipboard, and attach a mimetype.
You should not use this method directly. Use :meth:`copy` instead.
'''
pass
def get_types(self):
'''Return a list of supported mimetypes
'''
return []
def _ensure_clipboard(self):
''' Ensure that the clipboard has been properly initialised.
'''
if hasattr(self, '_clip_mime_type'):
return
if platform == 'win':
self._clip_mime_type = 'text/plain;charset=utf-8'
# windows clipboard uses a utf-16 little endian encoding
self._encoding = 'utf-16-le'
elif platform == 'linux':
self._clip_mime_type = 'text/plain;charset=utf-8'
self._encoding = 'utf-8'
else:
self._clip_mime_type = 'text/plain'
self._encoding = 'utf-8'
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
.. versionadded:: 1.9.0
'''
if data:
self._copy(data)
def paste(self):
''' Get text from the system clipboard and return it a usable string.
.. versionadded:: 1.9.0
'''
return self._paste()
def _copy(self, data):
self._ensure_clipboard()
if not isinstance(data, bytes):
data = data.encode(self._encoding)
self.put(data, self._clip_mime_type)
def _paste(self):
self._ensure_clipboard()
_clip_types = Clipboard.get_types()
mime_type = self._clip_mime_type
if mime_type not in _clip_types:
mime_type = 'text/plain'
data = self.get(mime_type)
if data is not None:
# decode only if we don't have unicode
# we would still need to decode from utf-16 (windows)
# data is of type bytes in PY3
if isinstance(data, bytes):
data = data.decode(self._encoding, 'ignore')
# remove null strings mostly a windows issue
data = data.replace(u'\x00', u'')
return data
return u''
# load clipboard implementation
_clipboards = []
if platform == 'android':
_clipboards.append(
('android', 'clipboard_android', 'ClipboardAndroid'))
elif platform == 'macosx':
_clipboards.append(
('nspaste', 'clipboard_nspaste', 'ClipboardNSPaste'))
elif platform == 'win':
_clipboards.append(
('winctypes', 'clipboard_winctypes', 'ClipboardWindows'))
elif platform == 'linux':
_clipboards.append(
('dbusklipper', 'clipboard_dbusklipper', 'ClipboardDbusKlipper'))
_clipboards.append(
('gtk3', 'clipboard_gtk3', 'ClipboardGtk3'))
_clipboards.append(
('xclip', 'clipboard_xclip', 'ClipboardXclip'))
_clipboards.append(
('xsel', 'clipboard_xsel', 'ClipboardXsel'))
if USE_SDL2:
_clipboards.append(
('sdl2', 'clipboard_sdl2', 'ClipboardSDL2'))
else:
_clipboards.append(
('pygame', 'clipboard_pygame', 'ClipboardPygame'))
_clipboards.append(
('dummy', 'clipboard_dummy', 'ClipboardDummy'))
Clipboard = core_select_lib('clipboard', _clipboards, True)
CutBuffer = None
if platform == 'linux':
_cutbuffers = [
('xclip', 'clipboard_xclip', 'ClipboardXclip'),
('xsel', 'clipboard_xsel', 'ClipboardXsel'),
]
if Clipboard.__class__.__name__ in (c[2] for c in _cutbuffers):
CutBuffer = Clipboard
else:
CutBuffer = core_select_lib('cutbuffer', _cutbuffers, True,
basemodule='clipboard')
if CutBuffer:
Logger.info('CutBuffer: cut buffer support enabled')
|
mit
|
abelfunctions/abelfunctions
|
examples/riemanntheta_demo.py
|
2
|
8564
|
"""
Grady Williams
January 28, 2013
This module provides functions for displaying graphs of the Riemann-Theta
function. There are 12 different graphs that can be generated, 10 of them
correspond to the graphics shown on the Digital Library of Mathematical
Functions page for Riemann Theta (dlmf.nist.gov/21.4) and the names of the
functions that generate those plots correspond to the names of the plots on
that page. (e.g plt_a1 plots generates the plot denoted a1 on the dlmf page).
The other two graphs are of the first and second derivatives for a given Omega.
Besides the plots for derivatives all of the plots have a few optional commands:
SIZE: Is the number of grid-points per direction over which the function is computed over, the
default is set to 75.
warp: Is the mayavi warp number documentation for it can be found at:
(docs.enthough.com/mayavi/mayavi/auto/mlab_helper_functions.html). The default is auto.
d_axes: Is a boolean value which determines whether or not the axis are displayed.
WARNING: If d_axis is set to True, be then warp should be set to '1'. Otherwise incorrect
axis will be displayed and function values will appear incorrect.
There are 3 different Omegas that are considered
Omega 1 = [[1.690983006 + .951056516*1.0j 1.5 + .363271264*1.0j]
[1.5 + .363271264*1.0j 1.309016994 + .951056516*1.0j]]
Omega 2 = [[1.0j -.5]
[-.5 1.0j]]
Omega 3 = [[-.5 + 1.0j .5 -.5*1.0j -.5-.5*1.0j]
[.5 -.5*1.0j 1.0j 0 ]
[-.5 - .5*1.0j 0 1.0j ]]
In all of the following graphs, the exponential growth of Riemann Theta has been factored out.
"""
from abelfunctions import RiemannTheta
import numpy as np
from mayavi.mlab import *
import matplotlib.pyplot as plt
gpu = True
try:
import pycuda.driver
except ImportError:
gpu = False
"""
Plots the real part of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.a1 on DLMF
"""
def plt_a1(SIZE=75, warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
"""
Plots the imaginary part of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.b1 on DLMF
"""
def plt_b1(SIZE=75,warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE,gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
"""
Plots the modulus of Riemann Theta for Omega 1 with z = (x + iy,0)
where x,y are real numbers such that 0 < x < 1, 0 < y < 5
corresponds to 21.4.1.c1 on DLMF
"""
def plt_c1(SIZE=75, warp="auto", d_axes=False):
X,Y,V = get_r1_vals(SIZE, gpu)
V = np.absolute(V)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_a2(SIZE=75,warp = "auto",d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_b2(SIZE=75,warp= "auto", d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_c2(SIZE=75, warp = "auto", d_axes=False):
X,Y,V = get_r2_vals(SIZE, gpu)
V = np.absolute(V)
s = surf(X,Y,V,warp_scale = warp)
if d_axes:
axes()
return s
def plt_a3(SIZE=75, warp = "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_b3(SIZE=75, warp= "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE,gpu)
V = V.imag
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_c3(SIZE=75, warp= "auto", d_axes=False):
X,Y,V = get_r3_vals(SIZE,gpu)
V = np.absolute(V)
s = surf(X,Y,V,warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_2(SIZE=75, warp = "auto", d_axes = False):
X,Y,V = get_d_vals(SIZE, gpu)
V = V.real
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_3(SIZE=75, warp = "auto", d_axes=False):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5,1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:2:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = np.absolute(V)
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_4(SIZE=75,warp = "auto", d_axes=False, gpu=False):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5,1.0j]])
X,Y = np.mgrid[0:4:SIZE*1.0j, 0:4:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z.real*1.0j,z.imag*1.0j] for z in Z], Omega, batch=True)
V = V.real
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V, warp_scale = warp)
if d_axes:
axes()
return s
def plt_21_4_5(SIZE=75,warp = "auto", d_axes=False, gpu=False):
theta = RiemannTheta
Omega = np.matrix([[-.5 + 1.0j, .5 -.5*1.0j, -.5-.5*1.0j],
[.5 -.5*1.0j, 1.0j, 0],
[-.5 - .5*1.0j, 0, 1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:3:1.0j*SIZE]
Z = X+Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0,0] for z in Z], Omega, batch=True)
V = V.real
V = V.reshape(SIZE,SIZE)
s = surf(X,Y,V,warp_scale=warp)
if d_axes:
axes()
return s
def plt_first_deriv():
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
k = [[1,0]]
Z = np.linspace(0,50,500)
U,V = theta.exp_and_osc_at_point([[0, z*1.0j] for z in Z], Omega, deriv=k, batch=True)
plt.plot(Z, V.real)
plt.show()
def plt_second_deriv():
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
k = [[1,0],[1,0]]
Z = np.linspace(0,50,500)
U,V = theta.exp_and_osc_at_point([[0, z*1.0j] for z in Z], Omega, deriv=k, batch=True)
plt.plot(Z, V.real)
plt.show()
def explosion(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[-1.5:1.5:SIZE*1.0j, -1.5:1.5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = np.exp(U)*V
V = V.reshape(SIZE, SIZE)
s = surf(X,Y,np.absolute(V), warp_scale = 'auto')
savefig("test.eps")
def get_r1_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = V.reshape(SIZE, SIZE)
return X,Y,V
def get_r2_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X = np.linspace(0,1,SIZE)
Y = np.linspace(0,1,SIZE)
Z = []
for x in X:
for y in Y:
Z.append([x,y])
U,V = theta.exp_and_osc_at_point(Z, Omega, batch=True)
V = V.reshape(SIZE,SIZE)
return X,Y,V
def get_r3_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.690983006 + .951056516*1.0j, 1.5 + .363271264*1.0j],
[1.5 + .363271264*1.0j, 1.309016994 + .951056516*1.0j]])
X,Y = np.mgrid[0:5:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y*1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[1.0j*z.real,1.0j*z.imag] for z in Z], Omega, batch=True)
V = V.reshape(SIZE, SIZE)
return X,Y,V
def get_d_vals(SIZE, gpu):
theta = RiemannTheta
Omega = np.matrix([[1.0j, -.5], [-.5, 1.0j]])
X,Y = np.mgrid[0:1:SIZE*1.0j, 0:5:SIZE*1.0j]
Z = X + Y * 1.0j
Z = Z.flatten()
U,V = theta.exp_and_osc_at_point([[z,0] for z in Z], Omega, batch=True)
V = V.reshape(SIZE,SIZE)
return X,Y,V
|
mit
|
laurent-george/bokeh
|
examples/glyphs/data_tables.py
|
41
|
3178
|
from bokeh.io import vplot
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, HoverTool, BoxSelectTool
from bokeh.models.widgets import DataTable, TableColumn, StringFormatter, NumberFormatter, StringEditor, IntEditor, NumberEditor, SelectEditor
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.sampledata.autompg2 import autompg2 as mpg
source = ColumnDataSource(mpg)
manufacturers = sorted(mpg["manufacturer"].unique())
models = sorted(mpg["model"].unique())
transmissions = sorted(mpg["trans"].unique())
drives = sorted(mpg["drv"].unique())
classes = sorted(mpg["class"].unique())
columns = [
TableColumn(field="manufacturer", title="Manufacturer", editor=SelectEditor(options=manufacturers), formatter=StringFormatter(font_style="bold")),
TableColumn(field="model", title="Model", editor=StringEditor(completions=models)),
TableColumn(field="displ", title="Displacement", editor=NumberEditor(step=0.1), formatter=NumberFormatter(format="0.0")),
TableColumn(field="year", title="Year", editor=IntEditor()),
TableColumn(field="cyl", title="Cylinders", editor=IntEditor()),
TableColumn(field="trans", title="Transmission", editor=SelectEditor(options=transmissions)),
TableColumn(field="drv", title="Drive", editor=SelectEditor(options=drives)),
TableColumn(field="class", title="Class", editor=SelectEditor(options=classes)),
TableColumn(field="cty", title="City MPG", editor=IntEditor()),
TableColumn(field="hwy", title="Highway MPG", editor=IntEditor()),
]
data_table = DataTable(source=source, columns=columns, editable=True)
plot = Plot(title=None, x_range= DataRange1d(), y_range=DataRange1d(), plot_width=1000, plot_height=300)
# Set up x & y axis
plot.add_layout(LinearAxis(), 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
# Add Glyphs
cty_glyph = Circle(x="index", y="cty", fill_color="#396285", size=8, fill_alpha=0.5, line_alpha=0.5)
hwy_glyph = Circle(x="index", y="hwy", fill_color="#CE603D", size=8, fill_alpha=0.5, line_alpha=0.5)
cty = plot.add_glyph(source, cty_glyph)
hwy = plot.add_glyph(source, hwy_glyph)
# Add the tools
tooltips = [
("Manufacturer", "@manufacturer"),
("Model", "@model"),
("Displacement", "@displ"),
("Year", "@year"),
("Cylinders", "@cyl"),
("Transmission", "@trans"),
("Drive", "@drv"),
("Class", "@class"),
]
cty_hover_tool = HoverTool(renderers=[cty], tooltips=tooltips + [("City MPG", "@cty")])
hwy_hover_tool = HoverTool(renderers=[hwy], tooltips=tooltips + [("Highway MPG", "@hwy")])
select_tool = BoxSelectTool(renderers=[cty, hwy], dimensions=['width'])
plot.add_tools(cty_hover_tool, hwy_hover_tool, select_tool)
layout = vplot(plot, data_table)
if __name__ == "__main__":
filename = "data_tables.html"
with open(filename, "w") as f:
f.write(file_html(layout, INLINE, "Data Tables"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
|
Gabrielcarvfer/NS3
|
src/bridge/bindings/modulegen__gcc_LP64.py
|
4
|
283981
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.bridge', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core')
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&')
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper [class]
module.add_class('BridgeHelper')
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent', import_from_module='ns.core')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >', 'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >*', 'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >&', 'ns3::LogComponent::ComponentList&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )', 'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )*', 'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Mac48Address )&', 'ns3::Mac48Address::TracedCallback&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
module.add_class('Mac8Address', import_from_module='ns.network')
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', 'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', 'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias('std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', 'ns3::NetDeviceContainer::Iterator&')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::ItemType [enumeration]
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## log.h (module 'core'): ns3::ParameterLogger [class]
module.add_class('ParameterLogger', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'])
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST', 'AUTO'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )', 'ns3::Time::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )*', 'ns3::Time::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )&', 'ns3::Time::TracedCallback&')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t')
typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*')
typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias('void ( * ) ( )', 'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( )*', 'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( )&', 'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::NetDevice::PromiscReceiveCallback&')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )', 'ns3::Packet::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )*', 'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > )&', 'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', 'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', 'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', 'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', 'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', 'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', 'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', 'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', 'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', 'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )', 'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )*', 'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )&', 'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', 'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', 'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', 'ns3::Packet::SinrTracedCallback&')
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel [class]
module.add_class('BridgeChannel', parent=root_module['ns3::Channel'])
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice [class]
module.add_class('BridgeNetDevice', parent=root_module['ns3::NetDevice'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type='map')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::TimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::TimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::TimePrinter&')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::NodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::NodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::NodePrinter&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )', 'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )*', 'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )&', 'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3BridgeHelper_methods(root_module, root_module['ns3::BridgeHelper'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BridgeChannel_methods(root_module, root_module['ns3::BridgeChannel'])
register_Ns3BridgeNetDevice_methods(root_module, root_module['ns3::BridgeNetDevice'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3BridgeHelper_methods(root_module, cls):
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper(ns3::BridgeHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::BridgeHelper const &', 'arg0')])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper() [constructor]
cls.add_constructor([])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(std::string nodeName, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): void ns3::BridgeHelper::SetDeviceAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetDeviceAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::EventImpl *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter(ns3::DefaultDeleter<ns3::NixVector> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::NixVector > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::NixVector>::Delete(ns3::NixVector * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::NixVector *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
## event-id.h (module 'core'): void ns3::EventId::Remove() [member function]
cls.add_method('Remove',
'void',
[])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
deprecated=True, is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
deprecated=True, is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) const [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::HasPrefix(ns3::Ipv6Prefix const & prefix) const [member function]
cls.add_method('HasPrefix',
'bool',
[param('ns3::Ipv6Prefix const &', 'prefix')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac8Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac8Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix, uint8_t prefixLength) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix'), param('uint8_t', 'prefixLength')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix, uint8_t prefixLength) [constructor]
cls.add_constructor([param('char const *', 'prefix'), param('uint8_t', 'prefixLength')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetMinimumPrefixLength() const [member function]
cls.add_method('GetMinimumPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::SetPrefixLength(uint8_t prefixLength) [member function]
cls.add_method('SetPrefixLength',
'void',
[param('uint8_t', 'prefixLength')])
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function]
cls.add_method('File',
'std::string',
[],
is_const=True)
## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function]
cls.add_method('GetComponentList',
'ns3::LogComponent::ComponentList *',
[],
is_static=True)
## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_static=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::LogLevel const', 'level')])
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac8Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(ns3::Mac8Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac8Address const &', 'arg0')])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address() [constructor]
cls.add_constructor([])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac8Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyTo(uint8_t * pBuffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')],
is_const=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static bool ns3::Mac8Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::Iterator ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'ns3::NetDeviceContainer::Iterator',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactory::IsTypeIdSet() const [member function]
cls.add_method('IsTypeIdSet',
'bool',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set() [member function]
cls.add_method('Set',
'void',
[])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::type [variable]
cls.add_instance_attribute('type', 'ns3::PacketMetadata::Item::ItemType', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): uint32_t ns3::PacketTagList::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-tag-list.h (module 'network'): uint32_t ns3::PacketTagList::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): uint32_t ns3::PacketTagList::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3ParameterLogger_methods(root_module, cls):
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor]
cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')])
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor]
cls.add_constructor([param('std::ostream &', 'os')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t v) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t v) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit=::ns3::Time::Unit::AUTO) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit', default_value='::ns3::Time::Unit::AUTO')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): ns3::Time ns3::Time::RoundTo(ns3::Time::Unit unit) const [member function]
cls.add_method('RoundTo',
'ns3::Time',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::int64x64_t'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_unary_numeric_operator('-')
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor]
cls.add_constructor([param('double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor]
cls.add_constructor([param('long double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor]
cls.add_constructor([param('int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor]
cls.add_constructor([param('long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor]
cls.add_constructor([param('long long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor]
cls.add_constructor([param('unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor]
cls.add_constructor([param('long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor]
cls.add_constructor([param('long long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor]
cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetInt() const [member function]
cls.add_method('GetInt',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t const', 'v')],
is_static=True)
## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::Round() const [member function]
cls.add_method('Round',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::ObjectBase*'], visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(std::size_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): std::size_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'std::size_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True, visibility='private')
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True, visibility='private')
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, is_virtual=True, visibility='protected')
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag, uint32_t start, uint32_t end) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag'), param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header, uint32_t size) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3BridgeChannel_methods(root_module, cls):
## bridge-channel.h (module 'bridge'): static ns3::TypeId ns3::BridgeChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel::BridgeChannel() [constructor]
cls.add_constructor([])
## bridge-channel.h (module 'bridge'): void ns3::BridgeChannel::AddChannel(ns3::Ptr<ns3::Channel> bridgedChannel) [member function]
cls.add_method('AddChannel',
'void',
[param('ns3::Ptr< ns3::Channel >', 'bridgedChannel')])
## bridge-channel.h (module 'bridge'): std::size_t ns3::BridgeChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'std::size_t',
[],
is_const=True, is_virtual=True)
## bridge-channel.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeChannel::GetDevice(std::size_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('std::size_t', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3BridgeNetDevice_methods(root_module, cls):
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice::BridgeNetDevice() [constructor]
cls.add_constructor([])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddBridgePort(ns3::Ptr<ns3::NetDevice> bridgePort) [member function]
cls.add_method('AddBridgePort',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'bridgePort')])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetBridgePort(uint32_t n) const [member function]
cls.add_method('GetBridgePort',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'n')],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Channel> ns3::BridgeNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint16_t ns3::BridgeNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetNBridgePorts() const [member function]
cls.add_method('GetNBridgePorts',
'uint32_t',
[],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Node> ns3::BridgeNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): static ns3::TypeId ns3::BridgeNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardBroadcast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardBroadcast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardUnicast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardUnicast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetLearnedState(ns3::Mac48Address source) [member function]
cls.add_method('GetLearnedState',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Mac48Address', 'source')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::Learn(ns3::Mac48Address source, ns3::Ptr<ns3::NetDevice> port) [member function]
cls.add_method('Learn',
'void',
[param('ns3::Mac48Address', 'source'), param('ns3::Ptr< ns3::NetDevice >', 'port')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ReceiveFromDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('ReceiveFromDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')],
visibility='protected')
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
|
viniciusgama/blog_gae
|
django/contrib/gis/gdal/prototypes/srs.py
|
321
|
3378
|
from ctypes import c_char_p, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, int_output, \
srs_output, string_output, void_output
## Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Creates a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)])
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)])
to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'), [c_void_p, POINTER(c_char_p), c_int], offset=-2)
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2)
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int])
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p])
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p])
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct= srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
|
bsd-3-clause
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/social/backends/nk.py
|
70
|
2723
|
from urllib import urlencode
import six
from requests_oauthlib import OAuth1
from social.backends.oauth import BaseOAuth2
class NKOAuth2(BaseOAuth2):
"""NK OAuth authentication backend"""
name = 'nk'
AUTHORIZATION_URL = 'https://nk.pl/oauth2/login'
ACCESS_TOKEN_URL = 'https://nk.pl/oauth2/token'
SCOPE_SEPARATOR = ','
ACCESS_TOKEN_METHOD = 'POST'
SIGNATURE_TYPE_AUTH_HEADER = 'AUTH_HEADER'
EXTRA_DATA = [
('id', 'id'),
]
def get_user_details(self, response):
"""Return user details from NK account"""
entry = response['entry']
return {
'username': entry.get('displayName'),
'email': entry['emails'][0]['value'],
'first_name': entry.get('displayName').split(' ')[0],
'id': entry.get('id')
}
def auth_complete_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': self.get_redirect_uri(state),
'scope': self.get_scope_argument()
}
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return details.get(self.ID_KEY)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
url = 'http://opensocial.nk-net.pl/v09/social/rest/people/@me?' + urlencode({
'nk_token': access_token,
'fields': 'name,surname,avatar,localization,age,gender,emails,birthdate'
})
return self.get_json(
url,
auth=self.oauth_auth(access_token)
)
def oauth_auth(self, token=None, oauth_verifier=None,
signature_type=SIGNATURE_TYPE_AUTH_HEADER):
key, secret = self.get_key_and_secret()
oauth_verifier = oauth_verifier or self.data.get('oauth_verifier')
token = token or {}
# decoding='utf-8' produces errors with python-requests on Python3
# since the final URL will be of type bytes
decoding = None if six.PY3 else 'utf-8'
state = self.get_or_create_state()
return OAuth1(key, secret,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=self.get_redirect_uri(state),
verifier=oauth_verifier,
signature_type=signature_type,
decoding=decoding)
|
agpl-3.0
|
Senseg/Py4A
|
python-modules/twisted/twisted/python/urlpath.py
|
81
|
3431
|
# -*- test-case-name: twisted.test.test_paths -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
import urlparse
import urllib
class URLPath:
def __init__(self, scheme='', netloc='localhost', path='',
query='', fragment=''):
self.scheme = scheme or 'http'
self.netloc = netloc
self.path = path or '/'
self.query = query
self.fragment = fragment
_qpathlist = None
_uqpathlist = None
def pathList(self, unquote=0, copy=1):
if self._qpathlist is None:
self._qpathlist = self.path.split('/')
self._uqpathlist = map(urllib.unquote, self._qpathlist)
if unquote:
result = self._uqpathlist
else:
result = self._qpathlist
if copy:
return result[:]
else:
return result
def fromString(klass, st):
t = urlparse.urlsplit(st)
u = klass(*t)
return u
fromString = classmethod(fromString)
def fromRequest(klass, request):
return klass.fromString(request.prePathURL())
fromRequest = classmethod(fromRequest)
def _pathMod(self, newpathsegs, keepQuery):
if keepQuery:
query = self.query
else:
query = ''
return URLPath(self.scheme,
self.netloc,
'/'.join(newpathsegs),
query)
def sibling(self, path, keepQuery=0):
l = self.pathList()
l[-1] = path
return self._pathMod(l, keepQuery)
def child(self, path, keepQuery=0):
l = self.pathList()
if l[-1] == '':
l[-1] = path
else:
l.append(path)
return self._pathMod(l, keepQuery)
def parent(self, keepQuery=0):
l = self.pathList()
if l[-1] == '':
del l[-2]
else:
# We are a file, such as http://example.com/foo/bar
# our parent directory is http://example.com/
l.pop()
l[-1] = ''
return self._pathMod(l, keepQuery)
def here(self, keepQuery=0):
l = self.pathList()
if l[-1] != '':
l[-1] = ''
return self._pathMod(l, keepQuery)
def click(self, st):
"""Return a path which is the URL where a browser would presumably take
you if you clicked on a link with an HREF as given.
"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(st)
if not scheme:
scheme = self.scheme
if not netloc:
netloc = self.netloc
if not path:
path = self.path
if not query:
query = self.query
elif path[0] != '/':
l = self.pathList()
l[-1] = path
path = '/'.join(l)
return URLPath(scheme,
netloc,
path,
query,
fragment)
def __str__(self):
x = urlparse.urlunsplit((
self.scheme, self.netloc, self.path,
self.query, self.fragment))
return x
def __repr__(self):
return ('URLPath(scheme=%r, netloc=%r, path=%r, query=%r, fragment=%r)'
% (self.scheme, self.netloc, self.path, self.query, self.fragment))
|
apache-2.0
|
48thct2jtnf/P
|
contrib/linearize/linearize-hashes.py
|
18
|
3037
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 51473
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
mit
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
wwfifi/uliweb
|
uliweb/orm/__init__.py
|
1
|
148065
|
# This module is used for wrapping SqlAlchemy to a simple ORM
# Author: limodou <[email protected]>
__all__ = ['Field', 'get_connection', 'Model', 'do_',
'set_debug_query', 'set_auto_create', 'set_auto_set_model',
'get_model', 'set_model', 'engine_manager',
'set_auto_transaction_in_web', 'set_auto_transaction_in_notweb',
'set_tablename_converter', 'set_check_max_length', 'set_post_do',
'rawsql', 'Lazy', 'set_echo', 'Session', 'get_session', 'set_session',
'CHAR', 'BLOB', 'TEXT', 'DECIMAL', 'Index', 'datetime', 'decimal',
'Begin', 'Commit', 'Rollback', 'Reset', 'ResetAll', 'CommitAll', 'RollbackAll',
'PICKLE', 'BIGINT', 'set_pk_type', 'PKTYPE', 'FILE', 'INT', 'SMALLINT', 'DATE',
'TIME', 'DATETIME', 'FLOAT', 'BOOLEAN', 'UUID', 'BINARY', 'VARBINARY',
'JSON', 'UUID_B',
'BlobProperty', 'BooleanProperty', 'DateProperty', 'DateTimeProperty',
'TimeProperty', 'DecimalProperty', 'FloatProperty', 'SQLStorage',
'IntegerProperty', 'Property', 'StringProperty', 'CharProperty',
'TextProperty', 'UnicodeProperty', 'Reference', 'ReferenceProperty',
'PickleProperty', 'BigIntegerProperty', 'FileProperty', 'JsonProperty',
'UUIDBinaryProperty', 'UUIDProperty',
'SelfReference', 'SelfReferenceProperty', 'OneToOne', 'ManyToMany',
'ReservedWordError', 'BadValueError', 'DuplicatePropertyError',
'ModelInstanceError', 'KindError', 'ConfigurationError', 'SaveError',
'BadPropertyTypeError', 'set_lazy_model_init',
'begin_sql_monitor', 'close_sql_monitor', 'set_model_config', 'text',
'get_object', 'get_cached_object',
'set_server_default', 'set_nullable', 'set_manytomany_index_reverse',
'NotFound',
'get_field_type', 'create_model', 'get_metadata', 'migrate_tables',
'print_model',
]
__auto_create__ = False
__auto_set_model__ = True
__auto_transaction_in_web__ = False
__auto_transaction_in_notweb__ = False
__debug_query__ = None
__default_engine__ = 'default'
__default_encoding__ = 'utf-8'
__zero_float__ = 0.0000005
__models__ = {}
__model_paths__ = {}
__pk_type__ = 'int'
__default_tablename_converter__ = None
__check_max_length__ = False #used to check max_length parameter
__default_post_do__ = None #used to process post_do topic
__nullable__ = False #not enabled null by default
__server_default__ = False #not enabled null by default
__manytomany_index_reverse__ = False
__lazy_model_init__ = False
import sys
import decimal
import threading
import datetime
import copy
import re
import cPickle as pickle
from uliweb.utils import date as _date
from uliweb.utils.common import (flat_list, classonlymethod, simple_value,
safe_str, import_attr)
from sqlalchemy import *
from sqlalchemy.sql import select, ColumnElement, text, true
from sqlalchemy.pool import NullPool
import sqlalchemy.engine.base as EngineBase
from uliweb.core import dispatch
import threading
import warnings
import inspect
from uliweb.utils.sorteddict import SortedDict
from . import patch
Local = threading.local()
Local.dispatch_send = True
Local.conn = {}
Local.trans = {}
Local.echo = False
Local.echo_func = sys.stdout.write
class Error(Exception):pass
class NotFound(Error):
def __init__(self, message, model, id):
self.message = message
self.model = model
self.id = id
def __str__(self):
return "%s(%s) instance can't be found" % (self.model.__name__, str(self.id))
class ModelNotFound(Error):pass
class ReservedWordError(Error):pass
class ModelInstanceError(Error):pass
class DuplicatePropertyError(Error):
"""Raised when a property is duplicated in a model definition."""
class BadValueError(Error):pass
class BadPropertyTypeError(Error):pass
class KindError(Error):pass
class ConfigurationError(Error):pass
class SaveError(Error):pass
_SELF_REFERENCE = object()
class Lazy(object): pass
class SQLStorage(dict):
"""
a dictionary that let you do d['a'] as well as d.a
"""
def __getattr__(self, key): return self[key]
def __setattr__(self, key, value):
if self.has_key(key):
raise SyntaxError('Object exists and cannot be redefined')
self[key] = value
def __repr__(self): return '<SQLStorage ' + dict.__repr__(self) + '>'
def set_auto_create(flag):
global __auto_create__
__auto_create__ = flag
def set_auto_transaction_in_notweb(flag):
global __auto_transaction_in_notweb__
__auto_transaction_in_notweb__ = flag
def set_auto_transaction_in_web(flag):
global __auto_transaction_in_web__
__auto_transaction_in_web__ = flag
def set_auto_set_model(flag):
global __auto_set_model__
__auto_set_model__ = flag
def set_debug_query(flag):
global __debug_query__
__debug_query__ = flag
def set_check_max_length(flag):
global __check_max_length__
__check_max_length__ = flag
def set_post_do(func):
global __default_post_do__
__default_post_do__ = func
def set_nullable(flag):
global __nullable__
__nullable__ = flag
def set_server_default(flag):
global __server_default__
__server_default__ = flag
def set_manytomany_index_reverse(flag):
global __manytomany_index_reverse__
__manytomany_index_reverse__ = flag
def set_encoding(encoding):
global __default_encoding__
__default_encoding__ = encoding
def set_dispatch_send(flag):
global Local
Local.dispatch_send = flag
def set_tablename_converter(converter=None):
global __default_tablename_converter__
__default_tablename_converter__ = converter
def set_lazy_model_init(flag):
global __lazy_model_init__
__lazy_model_init__ = flag
def get_tablename(tablename):
global __default_tablename_converter__
c = __default_tablename_converter__
if not c:
c = lambda x:x.lower()
return c(tablename)
def get_dispatch_send(default=True):
global Local
if not hasattr(Local, 'dispatch_send'):
Local.dispatch_send = default
return Local.dispatch_send
def set_echo(flag, time=None, explain=False, caller=True, session=None):
global Local
Local.echo = flag
Local.echo_args = {'time':time, 'explain':explain, 'caller':caller,
'session':None}
def set_pk_type(name):
global __pk_type__
__pk_type__ = name
def PKTYPE():
if __pk_type__ == 'int':
return int
else:
return BIGINT
def PKCLASS():
if __pk_type__ == 'int':
return Integer
else:
return BigInteger
class NamedEngine(object):
def __init__(self, name, options):
self.name = name
d = SQLStorage({
'engine_name':name,
'connection_args':{},
'debug_log':None,
'connection_type':'long',
'duplication':False,
})
strategy = options.pop('strategy', None)
d.update(options)
if d.get('debug_log', None) is None:
d['debug_log'] = __debug_query__
if d.get('connection_type') == 'short':
d['connection_args']['poolclass'] = NullPool
if strategy:
d['connection_args']['strategy'] = strategy
self.options = d
self.engine_instance = None
self.metadata = MetaData()
self._models = {}
self.local = threading.local() #used to save thread vars
self._create()
def _get_models(self):
if self.options.duplication:
return engine_manager[self.options.duplication].models
else:
return self._models
models = property(fget=_get_models)
def _create(self, new=False):
c = self.options
db = self.engine_instance
if not self.engine_instance or new:
args = c.get('connection_args', {})
self.engine_instance = create_engine(c.get('connection_string'), **args)
self.engine_instance.echo = c['debug_log']
self.engine_instance.metadata = self.metadata
self.metadata.bind = self.engine_instance
return self.engine_instance
def session(self, create=True):
"""
Used to created default session
"""
if hasattr(self.local, 'session'):
return self.local.session
else:
if create:
s = Session(self.name)
self.local.session = s
return s
def set_session(self, session):
self.local.session = session
@property
def engine(self):
return self.engine_instance
def print_pool_status(self):
if self.engine.pool:
print self.engine.pool.status()
class EngineManager(object):
def __init__(self):
self.engines = {}
def add(self, name, connection_args):
self.engines[name] = engine = NamedEngine(name, connection_args)
return engine
def get(self, name=None):
name = name or __default_engine__
engine = self.engines.get(name)
if not engine:
raise Error('Engine %s is not exists yet' % name)
return engine
def __getitem__(self, name=None):
return self.get(name)
def __setitem__(self, name, connection_args):
return self.add(name, connection_args)
def __contains__(self, name):
return name in self.engines
def items(self):
return self.engines.items()
engine_manager = EngineManager()
class Session(object):
"""
used to manage relationship between engine_name and connect
can also manage transcation
"""
def __init__(self, engine_name=None, auto_transaction=None,
auto_close=True, post_commit=None, post_commit_once=None):
"""
If auto_transaction is True, it'll automatically start transacation
in web environment, it'll be commit or rollback after the request finished
and in no-web environment, you should invoke commit or rollback yourself.
"""
self.engine_name = engine_name or __default_engine__
self.auto_transaction = auto_transaction
self.auto_close = auto_close
self.engine = engine_manager[engine_name]
self._conn = None
self._trans = None
self.local_cache = {}
self.post_commit = post_commit or []
self.post_commit_once = post_commit_once or []
def __str__(self):
return '<Session engine_name:%s, auto_transaction=%r, auto_close=%r>' % (
self.engine_name, self.auto_transaction, self.auto_close)
@property
def need_transaction(self):
from uliweb import is_in_web
global __auto_transaction_in_notweb__, __auto_transaction_in_web__
if self.auto_transaction is not None:
return self.auto_transaction
else:
#distinguish in web or not web environment
if is_in_web():
return __auto_transaction_in_web__
else:
return __auto_transaction_in_notweb__
@property
def connection(self):
if self._conn:
return self._conn
else:
self._conn = self.engine.engine.connect()
return self._conn
def execute(self, query, *args):
t = self.need_transaction
try:
if t:
self.begin()
return self.connection.execute(query, *args)
except:
if t:
self.rollback()
raise
def set_echo(self, flag, time=None, explain=False, caller=True):
global set_echo
set_echo(flag, time, explain, caller, self)
def do_(self, query, args=None):
global do_
return do_(query, self, args)
def begin(self):
if not self._trans:
self.connection
self._trans = self._conn.begin()
return self._trans
def commit(self):
if self._trans and self._conn.in_transaction():
self._trans.commit()
self._trans = None
if self.auto_close:
self._close()
#add post commit hook
if self.post_commit:
if not isinstance(self.post_commit, (list, tuple)):
self.post_commit = [self.post_commit]
for c in self.post_commit:
c()
#add post commit once hook
if self.post_commit_once:
if not isinstance(self.post_commit_once, (list, tuple)):
post_commit_once = [self.post_commit_once]
else:
post_commit_once = self.post_commit_once
self.post_commit_once = []
for c in post_commit_once:
c()
def in_transaction(self):
if not self._conn:
return False
return self._conn.in_transaction()
def rollback(self):
if self._trans and self._conn.in_transaction():
self._trans.rollback()
self._trans = None
if self.auto_close:
self._close()
def _close(self):
if self._conn:
self._conn.close()
self._conn = None
self.local_cache = {}
if self.engine.options.connection_type == 'short':
self.engine.engine.dispose()
def close(self):
self.rollback()
self._close()
def get_local_cache(self, key, creator=None):
value = self.local_cache.get(key)
if value:
return value
if callable(creator):
value = creator()
else:
value = creator
if value:
self.local_cache[key] = value
return value
def get_connection(connection='', engine_name=None, connection_type='long', **args):
"""
Creating an NamedEngine or just return existed engine instance
if '://' include in connection parameter, it'll create new engine object
otherwise return existed engine isntance
"""
engine_name = engine_name or __default_engine__
if '://' in connection:
d = {
'connection_string':connection,
'connection_args':args,
'connection_type':connection_type,
}
return engine_manager.add(engine_name, d).engine
else:
connection = connection or __default_engine__
if connection in engine_manager:
return engine_manager[connection].engine
else:
raise Error("Can't find engine %s" % connection)
def get_metadata(engine_name=None):
"""
get metadata according used for alembic
It'll import all tables
"""
dispatch.get(None, 'load_models')
engine = engine_manager[engine_name]
for tablename, m in engine.models.items():
get_model(tablename, engine_name, signal=False)
if hasattr(m, '__dynamic__') and getattr(m, '__dynamic__'):
m.table.__mapping_only__ = True
return engine.metadata
def get_session(ec=None, create=True):
"""
ec - engine_name or connection
"""
ec = ec or __default_engine__
if isinstance(ec, (str, unicode)):
session = engine_manager[ec].session(create=True)
elif isinstance(ec, Session):
session = ec
else:
raise Error("Connection %r should be existed engine name or Session object" % ec)
return session
def set_session(session=None, engine_name='default'):
if not session:
session = Session()
engine_manager[engine_name].set_session(session)
return session
def Reset(ec=None):
session = get_session(ec, False)
if session:
session.close()
def ResetAll():
for k, v in engine_manager.items():
session = v.session(create=False)
if session:
session.close()
@dispatch.bind('post_do', kind=dispatch.LOW)
def default_post_do(sender, query, conn, usetime):
if __default_post_do__:
__default_post_do__(sender, query, conn, usetime)
re_placeholder = re.compile(r'%\(\w+\)s')
def rawsql(query, ec=None):
if isinstance(query, Result):
query = query.get_query()
ec = ec or __default_engine__
engine = engine_manager[ec]
dialect = engine.engine.dialect
if isinstance(query, (str, unicode)):
return query
#return str(query.compile(compile_kwargs={"literal_binds": True})).replace('\n', '') + ';'
comp = query.compile(dialect=dialect)
b = re_placeholder.search(comp.string)
if b:
return comp.string % comp.params
else:
if dialect.name == 'postgresql':
return comp.string
else:
params = []
for k in comp.positiontup:
v = comp.params[k]
params.append(repr(simple_value(v)))
line = comp.string.replace('?', '%s') % tuple(params)
return line.replace('\n', '')+';'
def get_engine_name(ec=None):
"""
Get the name of a engine or session
"""
ec = ec or __default_engine__
if isinstance(ec, (str, unicode)):
return ec
elif isinstance(ec, Session):
return ec.engine_name
else:
raise Error("Parameter ec should be an engine_name or Session object, but %r found" % ec)
def print_model(model, engine_name=None, skipblank=False):
from sqlalchemy.schema import CreateTable, CreateIndex
engine = engine_manager[engine_name].engine
M = get_model(model)
t = M.table
s = []
s.append("%s;" % str(CreateTable(t).compile(dialect=engine.dialect)).rstrip())
for x in t.indexes:
s.append("%s;" % CreateIndex(x))
sql = '\n'.join(s)
if skipblank:
return re.sub('[\t\n]+', '', sql)
else:
return sql
def do_(query, ec=None, args=None):
"""
Execute a query
"""
from time import time
from uliweb.utils.common import get_caller
conn = get_session(ec)
b = time()
result = conn.execute(query, *(args or ()))
t = time() - b
dispatch.call(ec, 'post_do', query, conn, t)
flag = False
sql = ''
if hasattr(Local, 'echo') and Local.echo:
if hasattr(Local, 'echo_args'):
_ec = Local.echo_args.get('session')
else:
_ec = None
engine_name = get_engine_name(ec)
_e = get_engine_name(_ec)
if not _ec or _ec and _ec == _e:
if hasattr(Local, 'echo_args') and Local.echo_args['time']:
if t >= Local.echo_args['time']:
sql = rawsql(query)
flag = True
else:
sql = rawsql(query)
flag = True
if flag:
print '\n===>>>>> [%s]' % engine_name,
if hasattr(Local, 'echo_args') and Local.echo_args['caller']:
v = get_caller(skip=__file__)
print '(%s:%d:%s)' % v
else:
print
print sql
if hasattr(Local, 'echo_args') and Local.echo_args['explain'] and sql:
r = conn.execute('explain '+sql).fetchone()
print '\n----\nExplain: %s' % ''.join(["%s=%r, " % (k, v) for k, v in r.items()])
print '===<<<<< time used %fs\n' % t
return result
def save_file(result, filename, encoding='utf8', headers=None, convertors=None, visitor=None):
"""
save query result to a csv file
visitor can used to convert values, all value should be convert to string
visitor function should be defined as:
def visitor(keys, values, encoding):
#return new values []
convertors is used to convert single column value, for example:
convertors = {'field1':convert_func1, 'fields2':convert_func2}
def convert_func1(value, data):
value is value of field1
data is the record
if visitor and convertors all provided, only visitor is available.
headers used to convert column to a provided value
"""
import csv
from uliweb.utils.common import simple_value
convertors = convertors or {}
headers = headers or {}
def convert(k, v, data):
f = convertors.get(k)
if f:
v = f(v, data)
return v
def convert_header(k):
return headers.get(k, k)
def _r(x):
if isinstance(x, (str, unicode)):
return re.sub('\r\n|\r|\n', ' ', x)
else:
return x
with open(filename, 'wb') as f:
w = csv.writer(f)
w.writerow([simple_value(convert_header(x), encoding=encoding) for x in result.keys()])
for row in result:
if visitor and callable(visitor):
_row = visitor(result.keys, row.values(), encoding)
else:
_row = [convert(k, v, row) for k, v in zip(result.keys(), row.values())]
r = [simple_value(_r(x), encoding=encoding) for x in _row]
w.writerow(r)
def Begin(ec=None):
session = get_session(ec)
return session.begin()
def Commit(ec=None, close=None):
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
session = get_session(ec, False)
if session:
return session.commit()
def CommitAll(close=None):
"""
Commit all transactions according Local.conn
"""
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
for k, v in engine_manager.items():
session = v.session(create=False)
if session:
session.commit()
def Rollback(ec=None, close=None):
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
session = get_session(ec, False)
if session:
return session.rollback()
def RollbackAll(close=None):
"""
Rollback all transactions, according Local.conn
"""
if close:
warnings.simplefilter('default')
warnings.warn("close parameter will not need at all.", DeprecationWarning)
for k, v in engine_manager.items():
session = v.session(create=False)
if session:
session.rollback()
def check_reserved_word(f):
if f in ['put', 'save', 'table', 'tablename', 'c', 'columns', 'manytomany'] or f in dir(Model):
raise ReservedWordError(
"Cannot define property using reserved word '%s'. " % f
)
def set_model(model, tablename=None, created=None, appname=None, model_path=None):
"""
Register an model and tablename to a global variable.
model could be a string format, i.e., 'uliweb.contrib.auth.models.User'
:param appname: if no appname, then archive according to model
item structure
created
model
model_path
appname
For dynamic model you should pass model_path with '' value
"""
if isinstance(model, type) and issubclass(model, Model):
#use alias first
tablename = model._alias or model.tablename
tablename = tablename.lower()
#set global __models__
d = __models__.setdefault(tablename, {})
engines = d.get('config', {}).pop('engines', ['default'])
if isinstance(engines, (str, unicode)):
engines = [engines]
d['engines'] = engines
item = {}
if created is not None:
item['created'] = created
else:
item['created'] = None
if isinstance(model, (str, unicode)):
if model_path is None:
model_path = model
else:
model_path = model_path
if not appname:
appname = model.rsplit('.', 2)[0]
#for example 'uliweb.contrib.auth.models.User'
model = None
else:
appname = model.__module__.rsplit('.', 1)[0]
if model_path is None:
model_path = model.__module__ + '.' + model.__name__
else:
model_path = ''
#for example 'uliweb.contrib.auth.models'
model.__engines__ = engines
item['model'] = model
item['model_path'] = model_path
item['appname'] = appname
d['model_path'] = model_path
d['appname'] = appname
for name in engines:
if not isinstance(name, (str, unicode)):
raise BadValueError('Engine name should be string type, but %r found' % name)
engine_manager[name].models[tablename] = item.copy()
def set_model_config(model_name, config, replace=False):
"""
This function should be only used in initialization phrase
:param model_name: model name it's should be string
:param config: config should be dict. e.g.
{'__mapping_only__', '__tablename__', '__ext_model__'}
:param replace: if True, then replace original config, False will update
"""
assert isinstance(model_name, str)
assert isinstance(config, dict)
d = __models__.setdefault(model_name, {})
if replace:
d['config'] = config
else:
c = d.setdefault('config', {})
c.update(config)
def create_model(modelname, fields, indexes=None, basemodel=None, **props):
"""
Create model dynamically
:param fields: Just format like [
{'name':name, 'type':type, ...},
...
]
type should be a string, eg. 'str', 'int', etc
kwargs will be passed to Property.__init__() according field type,
it'll be a dict
:param props: Model attributes, such as '__mapping_only__', '__replace__'
:param indexes: Multiple fields index, single index can be set directly using `index=True`
to a field, the value format should be:
[
{'name':name, 'fields':[...], ...},
]
e.g. [
{'name':'audit_idx', 'fields':['table_id', 'obj_id']}
]
for kwargs can be ommited.
:param basemodel: Will be the new Model base class, so new Model can inherited
parent methods, it can be a string or a real class object
"""
assert not props or isinstance(props, dict)
assert not indexes or isinstance(indexes, list)
props = SortedDict(props or {})
props['__dynamic__'] = True
props['__config__'] = False
for p in fields:
kwargs = p.copy()
name = kwargs.pop('name')
_type = kwargs.pop('type')
#if the key is start with '_', then remove it
for k in kwargs.keys():
if k.startswith('_'):
kwargs.pop(k, None)
field_type = get_field_type(_type)
prop = field_type(**kwargs)
props[name] = prop
if basemodel:
model = import_attr(basemodel)
# model.clear_relation()
else:
model = Model
# try:
# old = get_model(modelname, signal=False)
# old.clear_relation()
# except ModelNotFound as e:
# pass
cls = type(str(modelname.title()), (model,), props)
tablename = props.get('__tablename__', modelname)
set_model(cls, tablename, appname=__name__, model_path='')
get_model(modelname, signal=False, reload=True)
indexes = indexes or []
for x in indexes:
kwargs = x.copy()
name = kwargs.pop('name')
fields = kwargs.pop('fields')
#if the key is start with '_', then remove it
for k in kwargs.keys():
if k.startswith('_'):
kwargs.pop(k, None)
if not isinstance(fields, (list, tuple)):
raise ValueError("Index value format is not right, the value is %r" % indexes)
props = []
for y in fields:
props.append(cls.c[y])
Index(name, *props, **kwargs)
return cls
def valid_model(model, engine_name=None):
if isinstance(model, type) and issubclass(model, Model):
return True
if engine_name:
engine = engine_manager[engine_name]
return model in engine.models
else:
return True
def check_model_class(model_cls):
# """
# :param model: Model instance
# Model.__engines__ could be a list, so if there are multiple then use
# the first one
# """
#check dynamic flag
if getattr(model_cls, "__dynamic__", False):
return True
#check the model_path
model_path = model_cls.__module__ + '.' + model_cls.__name__
_path = __models__.get(model_cls.tablename, {}).get('model_path', '')
if _path and model_path != _path:
return False
return True
def find_metadata(model):
"""
:param model: Model instance
"""
engine_name = model.get_engine_name()
engine = engine_manager[engine_name]
return engine.metadata
def get_model(model, engine_name=None, signal=True, reload=False):
"""
Return a real model object, so if the model is already a Model class, then
return it directly. If not then import it.
if engine_name is None, then if there is multi engines defined, it'll use
'default', but if there is only one engine defined, it'll use this one
:param dispatch: Used to switch dispatch signal
"""
if isinstance(model, type) and issubclass(model, Model):
return model
if not isinstance(model, (str, unicode)):
raise Error("Model %r should be string or unicode type" % model)
#make model name is lower case
model = model.lower()
model_item = __models__.get(model)
if not model_item:
model_item = dispatch.get(None, 'find_model', model_name=model)
if model_item:
if not engine_name:
#search according model_item, and it should has only one engine defined
engines = model_item['engines']
if len(engines) > 1:
engine_name = __default_engine__
else:
engine_name = engines[0]
engine = engine_manager[engine_name]
item = engine._models.get(model)
#process duplication
if not item and engine.options.duplication:
_item = engine.models.get(model)
if _item:
item = _item.copy()
item['model'] = None
engine._models[model] = item
if item:
loaded = False #True, model is already loaded, so consider if it needs be cached
m = item['model']
m_config = __models__[model].get('config', {})
if isinstance(m, type) and issubclass(m, Model):
loaded = True
if reload:
loaded = False
#add get_model previous hook
if signal:
model_inst = dispatch.get(None, 'get_model', model_name=model, model_inst=m,
model_info=item, model_config=m_config) or m
if m is not model_inst:
loaded = False
else:
model_inst = m
else:
#add get_model previous hook
if signal:
model_inst = dispatch.get(None, 'get_model', model_name=model, model_inst=None,
model_info=item, model_config=m_config)
else:
model_inst = None
if not model_inst:
if item['model_path']:
mod_path, name = item['model_path'].rsplit('.', 1)
mod = __import__(mod_path, fromlist=['*'])
model_inst = getattr(mod, name)
#empty model_path means dynamic model
if not model_inst:
raise ModelNotFound("Can't found the model %s in engine %s" % (model, engine_name))
if not loaded:
if model_inst._bound_classname == model and not reload:
model_inst = model_inst._use(engine_name)
item['model'] = model_inst
else:
config = __models__[model].get('config', {})
if config:
for k, v in config.items():
setattr(model_inst, k, v)
item['model'] = model_inst
model_inst._alias = model
model_inst._engine_name = engine_name
if __lazy_model_init__:
for k, v in model_inst.properties.items():
v.__property_config__(model_inst, k)
#add bind process
if reload:
reset = True
else:
reset = False
model_inst.bind(engine.metadata, reset=reset)
#post get_model
if signal:
dispatch.call(None, 'post_get_model', model_name=model, model_inst=model_inst,
model_info=item, model_config=m_config)
return model_inst
raise ModelNotFound("Can't found the model %s in engine %s" % (model, engine_name))
def get_object_id(engine_name, tablename, id):
return 'OC:%s:%s:%s' % (engine_name, tablename, str(id))
def get_object(table, id=None, condition=None, cache=False, fields=None, use_local=False,
engine_name=None, session=None):
"""
Get obj in Local.object_caches first and also use get(cache=True) function if
not found in object_caches
"""
from uliweb import functions, settings
model = get_model(table, engine_name)
#if id is an object of Model, so get the real id value
if isinstance(id, Model):
return id
if cache:
if use_local:
s = get_session(session)
key = get_object_id(s.engine_name, model.tablename, id)
value = s.get_local_cache(key)
if value:
return value
obj = model.get(id, condition=condition, fields=fields, cache=True)
if use_local:
value = s.get_local_cache(key, obj)
else:
obj = model.get(id, condition=condition, fields=fields)
return obj
def get_cached_object(table, id, condition=None, cache=True, fields=None, use_local=True, session=None):
return get_object(table, id, condition, cache, fields, use_local, session)
class SQLMointor(object):
def __init__(self, key_length=65, record_details=False):
self.count = SortedDict()
self.total = 0
self.key_length = key_length
self.details = []
self.record_details = record_details
def post_do(sender, query, conn, usetime, self=self):
sql = str(query)
c = self.count.setdefault(sql, {'count':0, 'time':0})
c['count'] += 1
c['time'] += usetime
self.total += 1
if self.record_details:
self.details.append(rawsql(query))
self.post_do = post_do
def print_(self, message=''):
print
print '====== sql execution count %d <%s> =======' % (self.total, message)
for k, v in sorted(self.count.items(), key=lambda x:x[1]):
k = k.replace('\r', '')
k = k.replace('\n', '')
if self.key_length and self.key_length>1 and len(k) > self.key_length:
k = k[:self.key_length-3]+'...'
if self.key_length > 0:
format = "%%-%ds %%3d %%.3f" % self.key_length
else:
format = "%s %3d %.3f"
print format % (k, v['count'], v['time'])
if self.record_details:
print '====== sql statements %d ====' % self.total
for line in self.details:
print '.', line
print
def close(self):
self.count = {}
self.total = 0
self.details = []
def begin_sql_monitor(key_length=70, record_details=False):
sql_monitor = SQLMointor(key_length, record_details)
dispatch.bind('post_do')(sql_monitor.post_do)
return sql_monitor
def close_sql_monitor(monitor):
dispatch.unbind('post_do', monitor.post_do)
monitor.close()
def get_migrate_script(context, tables, metadata, engine_name=None):
from alembic.autogenerate.api import compare_metadata, _produce_net_changes, \
_autogen_context, _indent, _produce_upgrade_commands, _compare_tables
from sqlalchemy.engine.reflection import Inspector
diffs = []
engine = engine_manager[engine_name]
imports = set()
autogen_context, connection = _autogen_context(context, imports)
#init autogen_context
autogen_context['opts']['sqlalchemy_module_prefix'] = 'sa.'
autogen_context['opts']['alembic_module_prefix'] = 'op.'
inspector = Inspector.from_engine(connection)
_tables = set(inspector.get_table_names()) & set(tables)
conn_table_names = set(zip([None] * len(_tables), _tables))
for t in tables:
m = engine.models.get(t)
if m and not m['model']:
get_model(t, engine_name, signal=False)
metadata_table_names = set(zip([None] * len(tables), tables))
_compare_tables(conn_table_names, metadata_table_names,
(),
inspector, metadata, diffs, autogen_context, False)
script = """
def upgrade():
""" + _indent(_produce_upgrade_commands(diffs, autogen_context)) + """
upgrade()
"""
script = """
import sqlalchemy as sa
%s
""" % '\n'.join(list(imports)) + script
return script
def run_migrate_script(context, script):
import logging
from alembic.operations import Operations
log = logging.getLogger(__name__)
op = Operations(context)
code = compile(script, '<string>', 'exec', dont_inherit=True)
env = {'op':op}
log.debug(script)
exec code in env
def migrate_tables(tables, engine_name=None):
"""
Used to migrate dynamic table to database
:param tables: tables name list, such as ['user']
"""
from alembic.migration import MigrationContext
engine = engine_manager[engine_name]
mc = MigrationContext.configure(engine.session().connection)
script = get_migrate_script(mc, tables, engine.metadata)
run_migrate_script(mc, script)
class ModelMetaclass(type):
def __init__(cls, name, bases, dct):
super(ModelMetaclass, cls).__init__(name, bases, dct)
if name == 'Model':
return
cls._set_tablename()
cls.properties = {}
cls._fields_list = []
cls._collection_names = {}
defined = set()
is_replace = dct.get('__replace__')
for base in bases:
if hasattr(base, 'properties') and not is_replace:
cls.properties.update(base.properties)
is_config = dct.get('__config__', True)
cls._manytomany = {}
cls._onetoone = {}
for attr_name in dct.keys():
attr = dct[attr_name]
if isinstance(attr, Property):
cls.add_property(attr_name, attr, set_property=False, config=not __lazy_model_init__)
if isinstance(attr, ManyToMany):
cls._manytomany[attr_name] = attr
#if there is already defined primary_key, the id will not be primary_key
#enable multi primary
#has_primary_key = bool([v for v in cls.properties.itervalues() if 'primary_key' in v.kwargs])
#add __without_id__ attribute to model, if set it, uliorm will not
#create 'id' field for the model
#if there is already has primary key, then id will not created
#change in 0.2.6 version
without_id = getattr(cls, '__without_id__', False)
if 'id' not in cls.properties and not without_id:
cls.properties['id'] = f = Field(PKTYPE(), autoincrement=True,
primary_key=True, default=None, nullable=False, server_default=None)
if not __lazy_model_init__:
f.__property_config__(cls, 'id')
setattr(cls, 'id', f)
fields_list = [(k, v) for k, v in cls.properties.items()]
fields_list.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
cls._fields_list = fields_list
#check if cls is matched with __models__ module_path
if not check_model_class(cls):
return
if cls._bind and not __lazy_model_init__:
cls.bind(auto_create=__auto_create__)
class LazyValue(object):
def __init__(self, name, property):
self.name = name
self.property = property
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
return self.property.get_lazy(model_instance, self.name, self.property.default)
def __set__(self, model_instance, value):
if model_instance is None:
return
setattr(model_instance, self.name, value)
class Property(object):
data_type = str
field_class = String
type_name = 'str'
creation_counter = 0
property_type = 'column' #Property type: 'column', 'compound', 'relation'
server_default = None
def __init__(self, verbose_name=None, fieldname=None, default=None,
required=False, validators=None, choices=None, max_length=None,
hint='', auto=None, auto_add=None, type_class=None, type_attrs=None,
placeholder='', extra=None,
sequence=False, **kwargs):
self.verbose_name = verbose_name
self.property_name = None
self.name = None
self.fieldname = fieldname
self.default = default
self.required = required
self.auto = auto
self.auto_add = auto_add
self.validators = validators or []
self.hint = hint
if not isinstance(self.validators, (tuple, list)):
self.validators = [self.validators]
self.choices = choices
self.max_length = max_length
self.kwargs = kwargs
self.sequence = sequence
self.creation_counter = Property.creation_counter
self.value = None
self.placeholder = placeholder
self.extra = extra or {}
self.type_attrs = type_attrs or {}
self.type_class = type_class or self.field_class
Property.creation_counter += 1
def get_parameters(self):
"""
Get common attributes and it'll used for Model.relationship clone process
"""
d = {}
for k in ['verbose_name', 'required', 'hint', 'placeholder', 'choices',
'default', 'validators', 'max_length']:
d[k] = getattr(self, k)
return d
def _get_column_info(self, kwargs):
kwargs['primary_key'] = self.kwargs.get('primary_key', False)
kwargs['autoincrement'] = self.kwargs.get('autoincrement', False)
kwargs['index'] = self.kwargs.get('index', False)
kwargs['unique'] = self.kwargs.get('unique', False)
#nullable default change to False
kwargs['nullable'] = self.kwargs.get('nullable', __nullable__)
if __server_default__:
kwargs['server_default' ] = self.kwargs.get('server_default', self.server_default)
else:
v = self.kwargs.get('server_default', None)
if v is not None and isinstance(v, (int, long)):
v = text(str(v))
kwargs['server_default' ] = v
def create(self, cls):
global __nullable__
kwargs = self.kwargs.copy()
kwargs['key'] = self.name
self._get_column_info(kwargs)
f_type = self._create_type()
args = ()
if self.sequence:
args = (self.sequence, )
# return Column(self.property_name, f_type, *args, **kwargs)
return Column(self.fieldname, f_type, *args, **kwargs)
def _create_type(self):
if self.max_length:
f_type = self.type_class(self.max_length, **self.type_attrs)
else:
f_type = self.type_class(**self.type_attrs)
return f_type
def __property_config__(self, model_class, property_name):
self.model_class = model_class
self.property_name = property_name
self.name = property_name
if not self.fieldname:
self.fieldname = property_name
setattr(model_class, self._lazy_value(), LazyValue(self._attr_name(), self))
def get_attr(self, model_instance, name, default):
v = None
if hasattr(model_instance, name):
v = getattr(model_instance, name)
if v is None:
if callable(default):
v = default()
else:
v = default
return v
def get_lazy(self, model_instance, name, default=None):
v = self.get_attr(model_instance, name, default)
if v is Lazy:
_id = getattr(model_instance, 'id')
if not _id:
raise BadValueError('Instance is not a validate object of Model %s, ID property is not found' % model_class.__name__)
model_instance.refresh()
v = self.get_attr(model_instance, name, default)
return v
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
try:
return self.get_lazy(model_instance, self._attr_name(), self.default)
except AttributeError:
return None
def __set__(self, model_instance, value):
if model_instance is None:
return
value = self.validate(value)
#add value to model_instance._changed_value, so that you can test if
#a object really need to save
setattr(model_instance, self._attr_name(), value)
def default_value(self):
if callable(self.default):
d = self.default()
else:
d = self.default
return d
def get_choices(self):
if callable(self.choices):
choices = self.choices()
else:
choices = self.choices
return choices or []
def get_display_value(self, value):
if value is None:
return ''
if self.choices:
v = dict(self.get_choices()).get(value, '')
if isinstance(v, str):
v = unicode(v, __default_encoding__)
return v
else:
if isinstance(value, Model):
return unicode(value)
else:
return self.to_unicode(value)
def _validate(self, value, from_dump=False):
if self.empty(value):
if self.required:
raise BadValueError('Property "%s" of Model [%s] is required, but %r found' % (self.name, self.model_class.__name__, value))
#skip Lazy value
if value is Lazy:
return value
try:
if from_dump:
value = self.convert_dump(value)
else:
value = self.convert(value)
except TypeError as err:
raise BadValueError('Property %s must be convertible to %s, but the value is (%s)' % (self.name, self.data_type, err))
if hasattr(self, 'custom_validate'):
value = self.custom_validate(value)
for v in self.validators:
v(value)
return value
def validate(self, value):
return self._validate(value)
def validate_dump(self, value):
return self._validate(value, from_dump=True)
def empty(self, value):
return (value is None) or (isinstance(value, (str, unicode)) and not value.strip())
def get_value_for_datastore(self, model_instance):
return getattr(model_instance, self._attr_name(), None)
def make_value_from_datastore(self, value):
return value
def convert(self, value):
if self.data_type and not isinstance(value, self.data_type):
return self.data_type(value)
else:
return value
def convert_dump(self, value):
return self.convert(value)
def __repr__(self):
return ("<%s 'type':%r, 'verbose_name':%r, 'name':%r, 'fieldname':%r, "
"'default':%r, 'required':%r, 'validator':%r, "
"'chocies':%r, 'max_length':%r, 'kwargs':%r>"
% (
self.__class__.__name__,
self.data_type,
self.verbose_name,
self.name,
self.fieldname,
self.default,
self.required,
self.validators,
self.choices,
self.max_length,
self.kwargs)
)
def _attr_name(self):
return '_STORED_' + self.name + '_'
def _lazy_value(self):
return '_' + self.name + '_'
def to_str(self, v):
if isinstance(v, unicode):
return v.encode(__default_encoding__)
elif isinstance(v, str):
return v
else:
if v is None:
return ''
return str(v)
def to_unicode(self, v):
if isinstance(v, str):
return unicode(v, __default_encoding__)
elif isinstance(v, unicode):
return v
else:
if v is None:
return u''
return unicode(v)
def to_column_info(self):
d = {}
d['verbose_name'] = self.verbose_name or ''
d['name'] = self.name
d['fieldname'] = self.fieldname
d['type'] = self.type_name
d['type_name'] = self.get_column_type_name()
d['relation'] = ''
if isinstance(self, Reference):
d['relation'] = '%s(%s:%s)' % (self.type_name, self.reference_class.__name__, self.reference_fieldname)
self._get_column_info(d)
return d
def get_column_type_name(self):
return self.type_name
class CharProperty(Property):
data_type = unicode
field_class = CHAR
server_default=''
type_name = 'CHAR'
def __init__(self, verbose_name=None, default=u'', max_length=None, **kwds):
if __check_max_length__ and not max_length:
raise BadPropertyTypeError("max_length parameter not passed for property %s" % self.__class__.__name__)
max_length = max_length or 255
super(CharProperty, self).__init__(verbose_name, default=default, max_length=max_length, **kwds)
def convert(self, value):
if value is None:
return u''
if isinstance(value, str):
return unicode(value, __default_encoding__)
else:
return self.data_type(value)
def _create_type(self):
if self.max_length:
f_type = self.type_class(self.max_length, convert_unicode=True, **self.type_attrs)
else:
f_type = self.type_class(**self.type_attrs)
return f_type
def to_str(self, v):
return safe_str(v)
def get_column_type_name(self):
return '%s(%d)' % (self.type_name, self.max_length)
class StringProperty(CharProperty):
type_name = 'VARCHAR'
field_class = VARCHAR
class BinaryProperty(CharProperty):
type_name = 'BINARY'
field_class = BINARY
data_type = str
def _create_type(self):
if self.max_length:
f_type = self.type_class(self.max_length, **self.type_attrs)
else:
f_type = self.type_class(**self.type_attrs)
return f_type
class VarBinaryProperty(BinaryProperty):
type_name = 'VARBINARY'
field_class = VARBINARY
class UUIDBinaryProperty(VarBinaryProperty):
type_name = 'UUID_B'
field_class = VARBINARY
def __init__(self, **kwds):
kwds['max_length'] = 16
super(UUIDBinaryProperty, self).__init__(**kwds)
self.auto_add = True
def default_value(self):
import uuid
u = uuid.uuid4()
return u.get_bytes()
def convert(self, value):
if value is None:
return ''
return value
class UUIDProperty(StringProperty):
type_name = 'UUID'
field_class = VARCHAR
def __init__(self, **kwds):
kwds['max_length'] = 32
super(UUIDProperty, self).__init__(**kwds)
self.auto_add = True
def default_value(self):
import uuid
u = uuid.uuid4()
return u.get_hex()[:self.max_length]
def convert(self, value):
if value is None:
return ''
return value
class FileProperty(StringProperty):
def __init__(self, verbose_name=None, max_length=None, upload_to=None, upload_to_sub=None, **kwds):
max_length = max_length or 255
super(FileProperty, self).__init__(verbose_name, max_length=max_length, **kwds)
self.upload_to = upload_to
self.upload_to_sub = upload_to_sub
class UnicodeProperty(StringProperty):
pass
class TextProperty(Property):
field_class = Text
data_type = unicode
type_name = 'TEXT'
def __init__(self, verbose_name=None, default=u'', **kwds):
super(TextProperty, self).__init__(verbose_name, default=default, max_length=None, **kwds)
def convert(self, value):
if not value:
return u''
if isinstance(value, str):
return unicode(value, __default_encoding__)
else:
return self.data_type(value)
class BlobProperty(Property):
field_class = BLOB
data_type = str
type_name = 'BLOB'
def __init__(self, verbose_name=None, default='', **kwds):
super(BlobProperty, self).__init__(verbose_name, default=default, max_length=None, **kwds)
def get_display_value(self, value):
return repr(value)
def convert(self, value):
if not value:
return ''
return value
class PickleProperty(BlobProperty):
field_class = PickleType
data_type = None
type_name = 'PICKLE'
def to_str(self, v):
return pickle.dumps(v, pickle.HIGHEST_PROTOCOL)
def convert_dump(self, v):
return pickle.loads(v)
def convert(self, value):
return value
class JsonProperty(TextProperty):
field_class = TEXT
data_type = None
type_name = 'JSON'
def get_value_for_datastore(self, model_instance):
from uliweb import json_dumps
return json_dumps(getattr(model_instance, self._attr_name(), None))
def make_value_from_datastore(self, value):
return self.convert_dump(value)
def convert_dump(self, v):
import json
return json.loads(v)
def convert(self, value):
return value
class DateTimeProperty(Property):
data_type = datetime.datetime
field_class = DateTime
server_default = '0000-00-00 00:00:00'
type_name = 'DATETIME'
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False,
format=None, **kwds):
super(DateTimeProperty, self).__init__(verbose_name, **kwds)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.format = format
def custom_validate(self, value):
if value and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s' %
(self.name, self.data_type.__name__))
return value
@staticmethod
def now():
return _date.now()
def make_value_from_datastore(self, value):
if value is not None:
value = self._convert_func(value)
return value
@staticmethod
def _convert_func(*args, **kwargs):
return _date.to_datetime(*args, **kwargs)
def convert(self, value):
if not value:
return None
d = self._convert_func(value, format=self.format)
if d:
return d
raise BadValueError('The datetime value is not a valid format')
def to_str(self, v):
if isinstance(v, self.data_type):
return _date.to_string(v, timezone=False)
else:
if not v:
return ''
return str(v)
def to_unicode(self, v):
if isinstance(v, self.data_type):
return unicode(_date.to_string(v, timezone=False))
else:
if not v:
return u''
return unicode(v)
class DateProperty(DateTimeProperty):
data_type = datetime.date
field_class = Date
server_default = '0000-00-00'
type_name = 'DATE'
@staticmethod
def _convert_func(*args, **kwargs):
return _date.to_date(*args, **kwargs)
@staticmethod
def now():
return _date.to_date(_date.now())
class TimeProperty(DateTimeProperty):
data_type = datetime.time
field_class = Time
server_default = '00:00:00'
type_name = 'TIME'
@staticmethod
def _convert_func(*args, **kwargs):
return _date.to_time(*args, **kwargs)
@staticmethod
def now():
return _date.to_time(_date.now())
class IntegerProperty(Property):
"""An integer property."""
data_type = int
field_class = Integer
server_default=text('0')
type_name = 'INTEGER'
def __init__(self, verbose_name=None, default=0, **kwds):
super(IntegerProperty, self).__init__(verbose_name, default=default, **kwds)
def convert(self, value):
if value == '':
return 0
if value is None:
return value
return self.data_type(value)
def custom_validate(self, value):
if value and not isinstance(value, (int, long, bool)):
raise BadValueError('Property %s must be an int, long or bool, not a %s'
% (self.name, type(value).__name__))
return value
class BigIntegerProperty(IntegerProperty):
field_class = BigInteger
type_name = 'BIGINT'
class SmallIntegerProperty(IntegerProperty):
field_class = SmallInteger
type_name = 'SMALLINT'
class FloatProperty(Property):
"""A float property."""
data_type = float
field_class = Float
server_default=text('0')
type_name = 'FLOAT'
def __init__(self, verbose_name=None, default=0.0, precision=None, **kwds):
super(FloatProperty, self).__init__(verbose_name, default=default, **kwds)
self.precision = precision
def _create_type(self):
f_type = self.type_class(precision=self.precision, **self.type_attrs)
return f_type
def convert(self, value):
if value == '' or value is None:
return 0.0
return self.data_type(value)
def custom_validate(self, value):
if value and not isinstance(value, float):
raise BadValueError('Property %s must be a float, not a %s'
% (self.name, type(value).__name__))
if abs(value) < __zero_float__:
value = 0.0
return value
def get_column_type_name(self):
return '%s' % self.type_name
class DecimalProperty(Property):
"""A float property."""
data_type = decimal.Decimal
field_class = Numeric
server_default=text('0.00')
type_name = 'DECIMAL'
def __init__(self, verbose_name=None, default='0.0', precision=10, scale=2, **kwds):
super(DecimalProperty, self).__init__(verbose_name, default=default, **kwds)
self.precision = precision
self.scale = scale
def convert(self, value):
if value == '' or value is None:
return decimal.Decimal('0.0')
return self.data_type(value)
def _create_type(self):
f_type = self.type_class(precision=self.precision, scale=self.scale, **self.type_attrs)
return f_type
def get_display_value(self, value):
if value is None:
return ''
if self.choices:
v = dict(self.get_choices()).get(str(value), '')
if isinstance(v, str):
v = unicode(v, __default_encoding__)
return v
else:
return str(value)
def get_column_type_name(self):
return '%s(%d,%d)' % (self.type_name, self.precision, self.scale)
class BooleanProperty(Property):
"""A boolean property."""
data_type = bool
field_class = Boolean
server_default=text('0')
type_name = 'BOOL'
def __init__(self, verbose_name=None, default=False, **kwds):
super(BooleanProperty, self).__init__(verbose_name, default=default, **kwds)
def custom_validate(self, value):
if value is not None and not isinstance(value, bool):
raise BadValueError('Property %s must be a boolean, not a %s'
% (self.name, type(value).__name__))
return value
def convert(self, value):
if not value:
return False
if value in ['1', 'True', 'true', True]:
return True
else:
return False
class ReferenceProperty(Property):
"""A property that represents a many-to-one reference to another model.
"""
data_type = int
field_class = PKCLASS()
type_name = 'Reference'
def __init__(self, reference_class=None, verbose_name=None, collection_name=None,
reference_fieldname=None, required=False, engine_name=None, **attrs):
"""Construct ReferenceProperty.
Args:
reference_class: Which model class this property references.
verbose_name: User friendly name of property.
collection_name: If provided, alternate name of collection on
reference_class to store back references. Use this to allow
a Model to have multiple fields which refer to the same class.
reference_fieldname used to specify which fieldname of reference_class
should be referenced
"""
super(ReferenceProperty, self).__init__(verbose_name, **attrs)
self._collection_name = collection_name
self.reference_fieldname = reference_fieldname or 'id'
self.required = required
self.engine_name = engine_name
self.reference_class = reference_class
if __lazy_model_init__:
if inspect.isclass(self.reference_class) and issubclass(self.reference_class, Model):
warnings.simplefilter('default')
warnings.warn("Reference Model should be a string type, but [%s] model class found." % self.reference_class.__name__, DeprecationWarning)
def create(self, cls):
global __nullable__
args = self.kwargs.copy()
args['key'] = self.name
# if not callable(self.default):
# args['default'] = self.default
args['primary_key'] = self.kwargs.get('primary_key', False)
args['autoincrement'] = self.kwargs.get('autoincrement', False)
args['index'] = self.kwargs.get('index', False)
args['unique'] = self.kwargs.get('unique', False)
args['nullable'] = self.kwargs.get('nullable', __nullable__)
f_type = self._create_type()
if __server_default__:
#for int or long data_type, it'll automatically set text('0')
if self.data_type is int or self.data_type is long :
args['server_default'] = text('0')
else:
v = self.reference_field.kwargs.get('server_default')
args['server_default'] = v
return Column(self.fieldname, f_type, **args)
def _create_type(self):
if not hasattr(self.reference_class, self.reference_fieldname):
raise KindError('reference_fieldname is not existed')
self.reference_field = getattr(self.reference_class, self.reference_fieldname)
#process data_type
self.data_type = self.reference_field.data_type
field_class = self.reference_field.field_class
if self.reference_field.max_length:
f_type = field_class(self.reference_field.max_length)
else:
f_type = field_class
return f_type
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE or self.reference_class is None:
self.reference_class = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.collection_name = self.reference_class.get_collection_name(model_class.tablename, self._collection_name, model_class.tablename)
setattr(self.reference_class, self.collection_name,
_ReverseReferenceProperty(model_class, property_name, self._id_attr_name()))
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
"""
if model_instance is None:
return self
if hasattr(model_instance, self._attr_name()):
# reference_id = getattr(model_instance, self._attr_name())
reference_id = self.get_lazy(model_instance, self._attr_name(), None)
else:
reference_id = None
if reference_id:
#this will cache the reference object
resolved = getattr(model_instance, self._resolved_attr_name())
if resolved is not None:
return resolved
else:
#change id_field to reference_fieldname
# id_field = self._id_attr_name()
# d = self.reference_class.c[id_field]
d = self.reference_class.c[self.reference_fieldname]
instance = self.reference_class.get(d==reference_id)
if instance is None:
raise NotFound('ReferenceProperty %s failed to be resolved' % self.reference_fieldname, self.reference_class, reference_id)
setattr(model_instance, self._resolved_attr_name(), instance)
return instance
else:
return None
def get_value_for_datastore(self, model_instance):
if not model_instance:
return None
else:
return getattr(model_instance, self._attr_name(), None)
def __set__(self, model_instance, value):
"""Set reference."""
value = self.validate(value)
if value is not None:
if not isinstance(value, Model):
setattr(model_instance, self._attr_name(), value)
setattr(model_instance, self._resolved_attr_name(), None)
else:
setattr(model_instance, self._attr_name(), getattr(value, self.reference_fieldname))
setattr(model_instance, self._resolved_attr_name(), value)
else:
setattr(model_instance, self._attr_name(), None)
setattr(model_instance, self._resolved_attr_name(), None)
def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if value == '':
if self.kwargs.get('nullable', __nullable__):
value = None
else:
value = 0
if not isinstance(value, Model):
return super(ReferenceProperty, self).validate(value)
if not value.is_saved():
raise BadValueError(
'%s instance must be saved before it can be stored as a '
'reference' % self.reference_class.__class__.__name__)
if not isinstance(value, self.reference_class):
raise KindError('Property %s must be an instance of %s' %
(self.name, self.reference_class.__class__.__name__))
return value
validate_dump = validate
def _id_attr_name(self):
"""Get attribute of referenced id.
"""
return self.reference_fieldname
def _resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return '_RESOLVED_' + self._attr_name()
def convert(self, value):
if value == '':
return 0
if value is None:
return value
return self.data_type(value)
def get_column_type_name(self):
return self.reference_field.get_column_type_name()
Reference = ReferenceProperty
class OneToOne(ReferenceProperty):
type_name = 'OneToOne'
def create(self, cls):
global __nullable__
args = self.kwargs.copy()
args['key'] = self.name
# if not callable(self.default):
# args['default'] = self.default
args['primary_key'] = self.kwargs.get('primary_key', False)
args['autoincrement'] = self.kwargs.get('autoincrement', False)
args['index'] = self.kwargs.get('index', True)
args['unique'] = self.kwargs.get('unique', True)
args['nullable'] = self.kwargs.get('nullable', __nullable__)
f_type = self._create_type()
if __server_default__:
if self.data_type is int or self.data_type is long :
args['server_default'] = text('0')
else:
args['server_default'] = self.reference_field.kwargs.get('server_default')
return Column(self.fieldname, f_type, **args)
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
#Direct invoke super with ReferenceProperty in order to skip the
#ReferenceProperty process, but instead of invode ReferenceProperty's
#parent function
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE:
self.reference_class = self.data_type = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.collection_name = self._collection_name
if self.collection_name is None:
self.collection_name = '%s' % (model_class.tablename)
if hasattr(self.reference_class, self.collection_name):
raise DuplicatePropertyError('Class %s already has property %s'
% (self.reference_class.__name__, self.collection_name))
setattr(self.reference_class, self.collection_name,
_OneToOneReverseReferenceProperty(model_class, property_name,
self._id_attr_name(), self.collection_name))
#append to reference_class._onetoone
self.reference_class._onetoone[self.collection_name] = model_class
def get_objs_columns(objs, field='id'):
ids = []
new_objs = []
if isinstance(objs, (str, unicode)):
objs = [int(x) for x in objs.split(',')]
for x in objs:
if not x:
continue
if isinstance(x, (tuple, list)):
new_objs.extend(x)
else:
new_objs.append(x)
for o in new_objs:
if not isinstance(o, Model):
_id = o
else:
_id = o.get_datastore_value(field)
if _id not in ids:
ids.append(_id)
return ids
class Result(object):
def __init__(self, model=None, condition=None, *args, **kwargs):
self.model = model
self.condition = condition
self.columns = list(self.model.table.c)
self.funcs = []
self.args = args
self.kwargs = kwargs
self.result = None
self.default_query_flag = True
self._group_by = None
self._having = None
self.distinct_field = None
self._values_flag = False
self._join = []
self._limit = None
self._offset = None
self.connection = model.get_session()
def do_(self, query):
global do_
return do_(query, self.connection)
def get_column(self, model, fieldname):
if isinstance(fieldname, (str, unicode)):
if issubclass(model, Model):
v = fieldname.split('.')
if len(v) > 1:
field = get_model(v[0], engine_name=self.model.get_engine_name(),
signal=False).table.c(v[1])
else:
field = model.table.c[fieldname]
else:
field = model.c[fieldname]
else:
field = fieldname
return field
def get_columns(self, model=None, columns=None):
columns = columns or self.columns
model = model or self.model
fields = []
field = None
if self.distinct_field is not None:
field = self.get_column(model, self.distinct_field)
fields.append(func.distinct(field).label(field.name))
for col in columns:
if col is not field:
fields.append(col)
return fields
def get_fields(self):
"""
get property instance according self.columns
"""
columns = self.columns
model = self.model
fields = []
for col in columns:
if isinstance(col, (str, unicode)):
v = col.split('.')
if len(v) > 1:
field = get_model(v[0], engine_name=self.model.get_engine_name(),
signal=False).properties(v[1])
else:
field = model.properties[col]
elif isinstance(col, Column):
field = get_model(col.table.name, engine_name=self.model.get_engine_name(),
signal=False).properties[col.name]
else:
field = col
fields.append(field)
return fields
def connect(self, connection):
if connection:
self.connection = connection
return self
use = connect
def all(self):
return self
def join(self, model, cond, isouter=False):
_join = None
model = get_model(model, engine_name=self.model.get_engine_name(),
signal=False)
if issubclass(model, Model):
# if cond is None:
# for prop in Model.proterties:
# if isinstance(prop, ReferenceProperty) and prop.reference_class is self.model:
# _right = prop.reference_class
# _join = self.model.table.join(_right.table,
# _right.c[prop.reference_fieldname])
# break
# else:
_join = self.model.table.join(model.table, cond, isouter=isouter)
self._join.append(_join)
else:
raise BadValueError("Only Model support in this function.")
return self
def get(self, condition=None):
if isinstance(condition, (int, long)):
return self.filter(self.model.c.id==condition).one()
else:
return self.filter(condition).one()
def count(self):
"""
If result is True, then the count will process result set , if
result if False, then only use condition to count
"""
if self._group_by or self._join:
return self.do_(self.get_query().alias().count()).scalar()
else:
return self.do_(self.get_query().with_only_columns([func.count()]).limit(None).order_by(None).offset(None)).scalar()
def any(self):
row = self.do_(
self.get_query().limit(1)
)
return len(list(row)) > 0
def filter(self, *condition):
"""
If there are multple condition, then treats them *and* relastion.
"""
if not condition:
return self
cond = true()
for c in condition:
if c is not None:
cond = c & cond
if self.condition is not None:
self.condition = cond & self.condition
else:
self.condition = cond
return self
def order_by(self, *args, **kwargs):
self.funcs.append(('order_by', args, kwargs))
return self
def group_by(self, *args):
self._group_by = args
return self
def having(self, *args):
self._having = args
return self
def fields(self, *args, **kwargs):
if args:
args = flat_list(args)
if args:
if 'id' not in args:
args.append('id')
self.funcs.append(('with_only_columns', ([self.get_column(self.model, x) for x in args],), kwargs))
return self
def values(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.model, x) for x in args],), kwargs))
self._values_flag = True
return self
def values_one(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.model, x) for x in args],), kwargs))
self.run(1)
result = self.result.fetchone()
return result
def distinct(self, field=None):
"""
If field is None, then it means that it'll create:
select distinct *
and if field is not None, for example: 'name', it'll create:
select distinc(name),
"""
if field is None:
self.funcs.append(('distinct', (), {}))
else:
self.distinct_field = field
return self
def limit(self, *args, **kwargs):
self.funcs.append(('limit', args, kwargs))
if args:
self._limit = bool(args[0])
else:
self._limit = False
return self
def offset(self, *args, **kwargs):
self._offset = True
self.funcs.append(('offset', args, kwargs))
return self
def update(self, **kwargs):
"""
Execute update table set field = field+1 like statement
"""
if self.condition is not None:
self.result = self.do_(self.model.table.update().where(self.condition).values(**kwargs))
else:
self.result = self.do_(self.model.table.update().values(**kwargs))
return self.result
def without(self, flag='default_query'):
if flag == 'default_query':
self.default_query_flag = False
return self
def run(self, limit=0):
query = self.get_query()
#add limit support
if limit > 0:
query = getattr(query, 'limit')(limit)
self.result = self.do_(query)
return self.result
def save_file(self, filename, encoding='utf8', headers=None, convertors=None, display=True):
"""
save result to a csv file.
display = True will convert value according choices value
"""
global save_file
convertors = convertors or {}
if display:
fields = self.get_fields()
for i, column in enumerate(fields):
if column.name not in convertors:
def f(value, data):
return column.get_display_value(value)
convertors[column.name] = f
return save_file(self.run(), filename, encoding=encoding, headers=headers, convertors=convertors)
def get_query(self, columns=None):
#user can define default_query, and default_query
#should be class method
columns = columns or self.get_columns()
if self.default_query_flag:
_f = getattr(self.model, 'default_query', None)
if _f:
_f(self)
from_ = self._join
from_.append(self.model.table)
if self.condition is not None:
query = select(columns, self.condition, from_obj=from_, **self.kwargs)
else:
query = select(columns, from_obj=from_, **self.kwargs)
for func, args, kwargs in self.funcs:
query = getattr(query, func)(*args, **kwargs)
if self._group_by:
query = query.group_by(*self._group_by)
if self._having:
query = query.having(*self._having)
return query
def load(self, values):
if self._values_flag:
return values
else:
return self.model.load(values.items())
def for_update(self, flag=True):
"""
please see http://docs.sqlalchemy.org/en/latest/core/expression_api.html search for update
"""
self.kwargs['for_update'] = flag
return self
def one(self):
self.run(1)
if not self.result:
return
result = self.result.fetchone()
if result:
return self.load(result)
first = one
def clear(self):
return do_(self.model.table.delete(self.condition), self.connection)
remove = clear
def __del__(self):
if self.result:
self.result.close()
self.result = None
def __iter__(self):
self.result = self.run()
while 1:
result = self.result.fetchone()
if not result:
raise StopIteration
yield self.load(result)
class ReverseResult(Result):
def __init__(self, model, condition, a_field, b_table, instance, b_field, *args, **kwargs):
self.model = model
self.b_table = b_table
self.b_field = b_field
self.instance = instance
self.condition = condition
self.a_field = a_field
self.columns = list(self.model.table.c)
self.funcs = []
self.args = args
self.kwargs = kwargs
self.result = None
self.default_query_flag = True
self._group_by = None
self._having = None
self._limit = None
self._offset = None
self._join = []
self.distinct_field = None
self._values_flag = False
self.connection = model.get_session()
def has(self, *objs):
ids = get_objs_columns(objs)
if not ids:
return False
return self.model.filter(self.condition, self.model.table.c['id'].in_(ids)).any()
def ids(self):
query = select([self.model.c['id']], self.condition)
ids = [x[0] for x in self.do_(query)]
return ids
def clear(self, *objs):
"""
Clear the third relationship table, but not the ModelA or ModelB
"""
if objs:
ids = get_objs_columns(objs)
self.do_(self.model.table.delete(self.condition & self.model.table.c['id'].in_(ids)))
else:
self.do_(self.model.table.delete(self.condition))
remove = clear
class ManyResult(Result):
def __init__(self, modela, instance, property_name, modelb,
table, fielda, fieldb, realfielda, realfieldb, valuea, through_model=None):
"""
modela will define property_name = ManyToMany(modelb) relationship.
instance will be modela instance
"""
self.modela = modela
self.instance = instance
self.property_name = property_name
self.modelb = modelb
self.table = table #third table
self.fielda = fielda
self.fieldb = fieldb
self.realfielda = realfielda
self.realfieldb = realfieldb
self.valuea = valuea
self.columns = list(self.modelb.table.c)
self.condition = ''
self.funcs = []
self.result = None
self.with_relation_name = None
self.through_model = through_model
self.default_query_flag = True
self._group_by = None
self._having = None
self._join = []
self._limit = None
self._offset = None
self.distinct_field = None
self._values_flag = False
self.connection = self.modela.get_session()
self.kwargs = {}
def all(self, cache=False):
"""
can use cache to return objects
"""
if cache:
return [get_object(self.modelb, obj_id, cache=True, use_local=True) for obj_id in self.ids(True)]
else:
return self
def get(self, condition=None):
if not isinstance(condition, ColumnElement):
return self.filter(self.modelb.c[self.realfieldb]==condition).one()
else:
return self.filter(condition).one()
def add(self, *objs):
new_objs = []
for x in objs:
if not x:
continue
if isinstance(x, (tuple, list)):
new_objs.extend(x)
else:
new_objs.append(x)
modified = False
for o in new_objs:
if not self.has(o):
if isinstance(o, Model):
v = getattr(o, self.realfieldb)
else:
v = o
d = {self.fielda:self.valuea, self.fieldb:v}
if self.through_model:
obj = self.through_model(**d)
obj.save()
else:
self.do_(self.table.insert().values(**d))
modified = modified or True
#cache [] to _STORED_attr_name
setattr(self.instance, self.store_key, Lazy)
return modified
@property
def store_key(self):
if self.property_name in self.instance.properties:
return self.instance.properties[self.property_name]._attr_name()
else:
return '_CACHED_'+self.property_name
def ids(self, cache=False):
key = self.store_key
ids = getattr(self.instance, key, None)
if not cache or ids is None or ids is Lazy:
if self.valuea is None:
return []
query = select([self.table.c[self.fieldb]], self.table.c[self.fielda]==self.valuea)
ids = [x[0] for x in self.do_(query)]
if cache:
setattr(self.instance, key, ids)
return ids
def update(self, *objs):
"""
Update the third relationship table, but not the ModelA or ModelB
"""
ids = self.ids()
new_ids = get_objs_columns(objs, self.realfieldb)
modified = False
for v in new_ids:
if v in ids: #the id has been existed, so don't insert new record
ids.remove(v)
else:
d = {self.fielda:self.valuea, self.fieldb:v}
if self.through_model:
obj = self.through_model(**d)
obj.save()
else:
self.do_(self.table.insert().values(**d))
modified = True
if ids: #if there are still ids, so delete them
self.clear(*ids)
modified = True
#cache [] to _STORED_attr_name
setattr(self.instance, self.store_key, new_ids)
return modified
def clear(self, *objs):
"""
Clear the third relationship table, but not the ModelA or ModelB
"""
if objs:
ids = get_objs_columns(objs, self.realfieldb)
self.do_(self.table.delete((self.table.c[self.fielda]==self.valuea) & (self.table.c[self.fieldb].in_(ids))))
else:
self.do_(self.table.delete(self.table.c[self.fielda]==self.valuea))
#cache [] to _STORED_attr_name
setattr(self.instance, self.store_key, Lazy)
remove = clear
def count(self):
if self._group_by or self._join:
return self.do_(self.get_query().alias().count()).scalar()
else:
return self.do_(
self.get_query().with_only_columns([func.count()]).limit(None).order_by(None).offset(None)
).scalar()
def any(self):
row = self.do_(
select([self.table.c[self.fieldb]],
(self.table.c[self.fielda]==self.valuea) &
self.condition).limit(1)
)
return len(list(row)) > 0
def has(self, *objs):
ids = get_objs_columns(objs, self.realfieldb)
if not ids:
return False
row = self.do_(select([text('*')],
(self.table.c[self.fielda]==self.valuea) &
(self.table.c[self.fieldb].in_(ids))).limit(1))
return len(list(row)) > 0
def fields(self, *args, **kwargs):
if args:
args = flat_list(args)
if args:
if 'id' not in args and 'id' in self.modelb.c:
args.append(self.modelb.c.id)
self.funcs.append(('with_only_columns', ([self.get_column(self.modelb, x) for x in args],), kwargs))
return self
def values(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.modelb, x) for x in args],), kwargs))
self._values_flag = True
return self
def values_one(self, *args, **kwargs):
self.funcs.append(('with_only_columns', ([self.get_column(self.modelb, x) for x in args],), kwargs))
self.run(1)
result = self.result.fetchone()
return result
def with_relation(self, relation_name=None):
"""
if relation is not None, when fetch manytomany result, also
fetch relation record and saved them to manytomany object,
and named them as relation.
If relation_name is not given, then default value is 'relation'
"""
if not relation_name:
relation_name = 'relation'
if hasattr(self.modelb, relation_name):
raise Error("The attribute name %s has already existed in Model %s!" % (relation_name, self.modelb.__name__))
if not self.through_model:
raise Error("Only with through style in ManyToMany supports with_relation function of Model %s!" % self.modelb.__name__)
self.with_relation_name = relation_name
return self
def run(self, limit=0):
query = self.get_query()
if limit > 0:
query = getattr(query, 'limit')(limit)
self.result = self.do_(query)
return self.result
def get_query(self):
#user can define default_query, and default_query
#should be class method
if self.default_query_flag:
_f = getattr(self.modelb, 'default_query', None)
if _f:
_f(self)
if self.with_relation_name:
columns = list(self.table.c) + self.columns
else:
columns = self.columns
query = select(
self.get_columns(self.modelb, columns),
(self.table.c[self.fielda] == self.valuea) &
(self.table.c[self.fieldb] == self.modelb.c[self.realfieldb]) &
self.condition,
**self.kwargs)
for func, args, kwargs in self.funcs:
query = getattr(query, func)(*args, **kwargs)
if self._group_by:
query = query.group_by(*self._group_by)
if self._having:
query = query.having(*self._having)
return query
def one(self):
self.run(1)
if not self.result:
return
result = self.result.fetchone()
if result:
if self._values_flag:
return result
offset = 0
if self.with_relation_name:
offset = len(self.table.columns)
o = self.modelb.load(zip(result.keys()[offset:], result.values()[offset:]))
if self.with_relation_name:
r = self.through_model.load(zip(result.keys()[:offset], result.values()[:offset]))
setattr(o, self.with_relation_name, r)
return o
def __del__(self):
if self.result:
self.result.close()
self.result = None
def __iter__(self):
self.run()
if not self.result:
raise StopIteration
offset = 0
if self.with_relation_name:
offset = len(self.table.columns)
while 1:
result = self.result.fetchone()
if not result:
raise StopIteration
if self._values_flag:
yield result
continue
o = self.modelb.load(zip(result.keys()[offset:], result.values()[offset:]))
if self.with_relation_name:
r = self.through_model.load(zip(result.keys()[:offset], result.values()[:offset]))
setattr(o, self.with_relation_name, r)
yield o
class ManyToMany(ReferenceProperty):
type_name = 'ManyToMany'
def __init__(self, reference_class=None, verbose_name=None, collection_name=None,
reference_fieldname=None, reversed_fieldname=None, required=False, through=None,
through_reference_fieldname=None, through_reversed_fieldname=None,
**attrs):
"""
Definition of ManyToMany property
:param reference_fieldname: relative to field of B
:param reversed_fieldname: relative to field of A
:param through_reference_fieldname: through model relative to field of B
:param through_reversed_fieldname: throught model relative to field of A
:param index_reverse: create index reversed
"""
super(ManyToMany, self).__init__(reference_class=reference_class,
verbose_name=verbose_name, collection_name=collection_name,
reference_fieldname=reference_fieldname, required=required, **attrs)
self.reversed_fieldname = reversed_fieldname or 'id'
self.through = through
self.through_reference_fieldname = through_reference_fieldname
self.through_reversed_fieldname = through_reversed_fieldname
self.index_reverse = attrs['index_reverse'] if 'index_reverse' in attrs else __manytomany_index_reverse__
def create(self, cls):
if not self.through:
self.fielda = "%s_id" % self.model_class.tablename
#test model_a is equels model_b
#modified by limodou
#if self.model_class.tablename == self.reference_class.tablename:
if cls.tablename == self.reference_class.tablename:
_t = self.reference_class.tablename + '_b'
else:
_t = self.reference_class.tablename
self.fieldb = "%s_id" % _t
self.table = self.create_table()
#add appname to self.table
# appname = self.model_class.__module__
appname = cls.__module__
self.table.__appname__ = appname[:appname.rfind('.')]
#modified by limodou
#self.model_class.manytomany.append(self.table)
cls.manytomany.append(self.table)
index_name = '%s_mindx' % self.tablename
if index_name not in [x.name for x in self.table.indexes]:
Index(index_name, self.table.c[self.fielda], self.table.c[self.fieldb], unique=True)
#add field_b index
if self.index_reverse:
Index('%s_rmindx' % self.tablename, self.table.c[self.fieldb])
#process __mapping_only__ property, if the modela or modelb is mapping only
#then manytomany table will be mapping only
# if getattr(self.model_class, '__mapping_only__', False) or getattr(self.reference_class, '__mapping_only__', False):
if getattr(cls, '__mapping_only__', False) or getattr(self.reference_class, '__mapping_only__', False):
self.table.__mapping_only__ = True
else:
self.table.__mapping_only__ = False
def get_real_property(self, model, field):
return getattr(model, field).field_class
def get_type(self, model, field):
field = getattr(model, field)
field_class = field.field_class
if field.max_length:
f_type = field_class(field.max_length)
else:
f_type = field_class
return f_type
def create_table(self):
_table = Table(self.tablename, self.model_class.metadata,
Column(self.fielda, self.get_type(self.model_class, self.reversed_fieldname)),
Column(self.fieldb, self.get_type(self.reference_class, self.reference_fieldname)),
# ForeignKeyConstraint([a], [a_id]),
# ForeignKeyConstraint([b], [b_id]),
extend_existing=True
)
return _table
def init_through(self):
def find_property(properties, model, skip=None):
for k, v in properties.items():
if isinstance(v, ReferenceProperty) and v.reference_class is model and (not skip or skip and v.reference_class is not skip):
return k, v
if self.through and (not isinstance(self.through, type) or not issubclass(self.through, Model)):
if not (
(isinstance(self.through, type) and issubclass(self.reference_class, Model)) or
valid_model(self.reference_class)):
raise KindError('through must be Model or available table name')
self.through = get_model(self.through, engine_name=self.engine_name,
signal=False)
#auto find model
_auto_model = None
#process through_reference_fieldname
if self.through_reversed_fieldname:
k = self.through_reversed_fieldname
v = self.through.properties.get(k)
if not v:
raise BadPropertyTypeError("Can't find property %s in through model %s" % (
k, self.through.__name__))
else:
x = find_property(self.through.properties, self.model_class)
if not x:
raise BadPropertyTypeError("Can't find reference property of model %s in through model %s" % (
self.model_class.__name__, self.through.__name__))
k, v = x
_auto_model = self.model_class
self.fielda = k
self.reversed_fieldname = v.reference_fieldname
#process through_reversed_fieldname
if self.through_reference_fieldname:
k = self.through_reference_fieldname
v = self.through.properties.get(k)
if not v:
raise BadPropertyTypeError("Can't find property %s in through model %s" % (
k, self.through.__name__))
else:
x = find_property(self.through.properties, self.reference_class, self.model_class)
if not x:
raise BadPropertyTypeError("Can't find reference property of model %s in through model %s" % (
self.model_class.__name__, self.through.__name__))
k, v = x
#check if the auto find models are the same
if _auto_model and _auto_model is self.reference_class:
raise BadPropertyTypeError("If the two reference fields come from the same"
" model, you should specify them via through_reference_fieldname or"
" through_reversed_fieldname in through model %s" % self.through.__name__)
self.fieldb = k
self.reference_fieldname = v.reference_fieldname
self.table = self.through.table
appname = self.model_class.__module__
self.table.__appname__ = appname[:appname.rfind('.')]
self.model_class.manytomany.append(self.table)
Index('%s_mindx' % self.tablename, self.table.c[self.fielda], self.table.c[self.fieldb], unique=True)
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
"""
#Direct invoke super with ReferenceProperty in order to skip the
#ReferenceProperty process, but instead of invode ReferenceProperty's
#parent function
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if not (
(isinstance(self.reference_class, type) and issubclass(self.reference_class, Model)) or
self.reference_class is _SELF_REFERENCE or
valid_model(self.reference_class, self.engine_name)):
raise KindError('reference_class %r must be Model or _SELF_REFERENCE or available table name' % self.reference_class)
if self.reference_class is _SELF_REFERENCE or self.reference_class is None:
self.reference_class = self.data_type = model_class
else:
self.reference_class = get_model(self.reference_class, self.engine_name,
signal=False)
self.tablename = '%s_%s_%s' % (model_class.tablename, self.reference_class.tablename, property_name)
self.collection_name = self.reference_class.get_collection_name(model_class.tablename, self._collection_name, model_class.tablename)
setattr(self.reference_class, self.collection_name,
_ManyToManyReverseReferenceProperty(self, self.collection_name))
def get_lazy(self, model_instance, name, default=None):
v = self.get_attr(model_instance, name, default)
if v is Lazy:
# _id = getattr(model_instance, 'id')
# if not _id:
# raise BadValueError('Instance is not a validate object of Model %s, ID property is not found' % model_instance.__class__.__name__)
result = getattr(model_instance, self.name)
v = result.ids(True)
setattr(model_instance, name, v)
#2014/3/1 save value to Model_instance._old_values
#this will cause manytomany need not to check when saving
#or it'll compare the difference between old_value and database(use select)
model_instance._old_values[self.name] = v
return v
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
"""
self.init_through()
if model_instance:
reference_id = getattr(model_instance, self.reversed_fieldname, None)
x = ManyResult(self.model_class, model_instance, self.property_name, self.reference_class, self.table,
self.fielda, self.fieldb, self.reversed_fieldname,
self.reference_fieldname, reference_id, through_model=self.through)
return x
else:
return self
def __set__(self, model_instance, value):
if model_instance is None:
return
if value and value is not Lazy:
value = get_objs_columns(value, self.reference_fieldname)
setattr(model_instance, self._attr_name(), value)
def get_value_for_datastore(self, model_instance, cached=False):
"""Get key of reference rather than reference itself."""
value = getattr(model_instance, self._attr_name(), None)
if not cached:
value = getattr(model_instance, self.property_name).ids()
setattr(model_instance, self._attr_name(), value)
return value
def get_display_value(self, value):
s = []
for x in value:
s.append(unicode(x))
return ' '.join(s)
def in_(self, *objs):
"""
Create a condition
"""
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
ids = get_objs_columns(objs, self.reference_fieldname)
sub_query = select([self.table.c[self.fielda]], (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & (self.table.c[self.fieldb].in_(ids)))
condition = self.model_class.c[self.reversed_fieldname].in_(sub_query)
return condition
def join_in(self, *objs):
"""
Create a join condition, connect A and C
"""
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
ids = get_objs_columns(objs, self.reference_fieldname)
return (self.table.c[self.fielda] == self.model_class.c[self.reversed_fieldname]) & (self.table.c[self.fieldb].in_(ids))
def join_right_in(self, *objs):
"""
Create a join condition, connect B and C
"""
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
ids = get_objs_columns(objs, self.reference_fieldname)
return (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & (self.table.c[self.fielda].in_(ids))
def filter(self, *condition):
cond = true()
for c in condition:
if c is not None:
cond = c & cond
sub_query = select([self.table.c[self.fielda]], (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & cond)
condition = self.model_class.c[self.reversed_fieldname].in_(sub_query)
return condition
def join_filter(self, *condition):
cond = true()
for c in condition:
if c is not None:
cond = c & cond
return (self.table.c[self.fielda] == self.model_class.c[self.reversed_fieldname]) & (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & cond
def convert_dump(self, value):
if not value:
return []
return [int(x) for x in value.split(',')]
def to_column_info(self):
d = {}
d['verbose_name'] = self.verbose_name or ''
d['name'] = self.name
d['fieldname'] = self.fieldname
d['type'] = self.type_name
d['type_name'] = self.type_name
d['relation'] = 'ManyToMany(%s:%s-%s:%s)' % (self.model_class.__name__, self.reversed_fieldname,
self.reference_class.__name__, self.reference_fieldname)
self._get_column_info(d)
return d
def SelfReferenceProperty(verbose_name=None, collection_name=None, **attrs):
"""Create a self reference.
"""
if 'reference_class' in attrs:
raise ConfigurationError(
'Do not provide reference_class to self-reference.')
return ReferenceProperty(_SELF_REFERENCE, verbose_name, collection_name, **attrs)
SelfReference = SelfReferenceProperty
class _ReverseReferenceProperty(Property):
"""The inverse of the Reference property above.
We construct reverse references automatically for the model to which
the Reference property is pointing to create the one-to-many property for
that model. For example, if you put a Reference property in model A that
refers to model B, we automatically create a _ReverseReference property in
B called a_set that can fetch all of the model A instances that refer to
that instance of model B.
"""
def __init__(self, model, reference_id, reversed_id):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
"""
self._model = model #A
self._reference_id = reference_id #A Reference(B) this is A's reference field
self._reversed_id = reversed_id #B's reference_field
self.verbose_name = ''
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance is not None: #model_instance is B's
_id = getattr(model_instance, self._reversed_id, None)
if _id is not None:
a_id = self._reference_id
a_field = self._model.c[self._reference_id]
return ReverseResult(self._model, a_field==_id, self._reference_id, model_class.table, model_instance, self._reversed_id)
else:
# return Result()
return None
else:
return self
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
raise BadValueError('Virtual property is read-only')
class _OneToOneReverseReferenceProperty(_ReverseReferenceProperty):
def __init__(self, model, reference_id, reversed_id, collection_name):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
"""
self._model = model
self._reference_id = reference_id #B Reference(A) this is B's id
self._reversed_id = reversed_id #A's id
self._collection_name = collection_name
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance:
_id = getattr(model_instance, self._reversed_id, None)
# print self._resolved_attr_name()
if _id is not None:
#this will cache the reference object
resolved = getattr(model_instance, self._resolved_attr_name(), None)
if resolved is not None:
return resolved
else:
b_id = self._reference_id
d = self._model.c[self._reference_id]
instance = self._model.get(d==_id)
if not instance:
instance = self._model(**{b_id:_id})
instance.save()
setattr(model_instance, self._resolved_attr_name(), instance)
return instance
else:
return None
else:
return self
def _resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return '_RESOLVED_' + self._collection_name
class _ManyToManyReverseReferenceProperty(_ReverseReferenceProperty):
def __init__(self, reference_property, collection_name):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
"""
self.reference_property = reference_property
self._collection_name = collection_name
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
self.reference_property.init_through()
self._reversed_id = self.reference_property.reference_fieldname
if model_instance:
reference_id = getattr(model_instance, self._reversed_id, None)
x = ManyResult(self.reference_property.reference_class, model_instance,
self._collection_name,
self.reference_property.model_class, self.reference_property.table,
self.reference_property.fieldb, self.reference_property.fielda,
self.reference_property.reference_fieldname,
self.reference_property.reversed_fieldname, reference_id,
through_model=self.reference_property.through)
return x
else:
return self
FILE = FileProperty
PICKLE = PickleProperty
UUID = UUIDProperty
UUID_B = UUIDBinaryProperty
JSON = JsonProperty
_fields_mapping = {
BIGINT:BigIntegerProperty,
str:StringProperty,
VARCHAR:StringProperty,
CHAR:CharProperty,
unicode: UnicodeProperty,
BINARY: BinaryProperty,
VARBINARY: VarBinaryProperty,
TEXT: TextProperty,
BLOB: BlobProperty,
int:IntegerProperty,
SMALLINT: SmallIntegerProperty,
INT:IntegerProperty,
float:FloatProperty,
FLOAT:FloatProperty,
bool:BooleanProperty,
BOOLEAN:BooleanProperty,
datetime.datetime:DateTimeProperty,
DATETIME:DateTimeProperty,
JSON:JsonProperty,
datetime.date:DateProperty,
DATE:DateProperty,
datetime.time:TimeProperty,
TIME:TimeProperty,
decimal.Decimal:DecimalProperty,
DECIMAL:DecimalProperty,
UUID_B:UUIDBinaryProperty,
UUID:UUIDProperty
}
def Field(type, *args, **kwargs):
t = _fields_mapping.get(type, type)
return t(*args, **kwargs)
def get_field_type(_type):
assert isinstance(_type, (str, unicode))
_t = eval(_type)
return _fields_mapping.get(_t, _t)
class ModelReprDescriptor(object):
def __get__(self, model_instance, model_class):
def f():
from IPython.display import display_html, display_svg
if model_instance is None:
display_html(self._cls_repr_html_(model_class))
display_svg(self._cls_repr_svg_(model_class))
else:
display_html(self._instance_repr_html_(model_instance))
return f
def _cls_repr_html_(self, cls):
from IPython.display import HTML
return HTML('<pre>'+print_model(cls)+'</pre>')
def _cls_repr_svg_(self, cls):
import os
from uliweb.orm.graph import generate_file
from uliweb import application
from uliweb.utils.common import get_tempfilename
from IPython.display import SVG
engine_name = cls.get_engine_name()
fontname = os.environ.get('dot_fontname', '')
outputfile = get_tempfilename('dot_svg_', suffix='.svg')
generate_file({cls.tablename:cls.table}, application.apps,
outputfile, 'svg', engine_name, fontname=fontname)
return SVG(filename=outputfile)
def _instance_repr_html_(self, instance):
from uliweb.core.html import Table
from IPython.display import HTML
s = []
for k, v in instance._fields_list:
if not isinstance(v, ManyToMany):
info = v.to_column_info()
d = [info['verbose_name'], info['name'], info['type_name']]
t = getattr(instance, k, None)
if isinstance(v, Reference) and t:
d.append('%s:%r:%s' % (v.reference_class.__name__, t.id, unicode(t)))
else:
d.append(t)
s.append(d)
return HTML(str(Table(s, ['Display Name', 'Column Name',
'Column Type', 'Value'])))
class Model(object):
__metaclass__ = ModelMetaclass
__dispatch_enabled__ = True
_engine_name = None
_connection = None
_alias = None #can be used via get_model(alias)
_collection_set_id = 1
_bind = True
_bound_classname = ''
_base_class = None
_lock = threading.Lock()
_c_lock = threading.Lock()
#add support for IPython notebook display
_ipython_display_ = ModelReprDescriptor()
def __init__(self, **kwargs):
self._old_values = {}
self._load(kwargs, from_='')
def set_saved(self):
self._old_values = self.to_dict()
for k, v in self.properties.items():
if isinstance(v, ManyToMany):
t = v.get_value_for_datastore(self, cached=True)
if not t is Lazy:
self._old_values[k] = t
def to_dict(self, fields=None, convert=True, manytomany=False):
d = {}
fields = fields or []
for k, v in self.properties.items():
if fields and not k in fields:
continue
if not isinstance(v, ManyToMany):
t = v.get_value_for_datastore(self)
if isinstance(t, Model):
t = t.id
if convert:
d[k] = self.field_str(t)
else:
d[k] = t
else:
if manytomany:
d[k] = getattr(self, v._lazy_value(), [])
return d
def field_str(self, v, strict=False):
if v is None:
if strict:
return ''
return v
if isinstance(v, datetime.datetime):
return v.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(v, datetime.date):
return v.strftime('%Y-%m-%d')
elif isinstance(v, datetime.time):
return v.strftime('%H:%M:%S')
elif isinstance(v, decimal.Decimal):
return str(v)
elif isinstance(v, unicode):
return v.encode(__default_encoding__)
else:
if strict:
return str(v)
return copy.deepcopy(v)
def _get_data(self):
"""
Get the changed property, it'll be used to save the object
"""
if self.id is None or self.id == '':
d = {}
for k, v in self.properties.items():
# if not isinstance(v, ManyToMany):
if v.property_type == 'compound':
continue
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x.id
elif x is None or (k=='id' and not x):
if isinstance(v, DateTimeProperty) and v.auto_now_add:
x = v.now()
elif (v.auto_add or (not v.auto and not v.auto_add)):
x = v.default_value()
else:
x = v.get_value_for_datastore(self, cached=True)
if x is not None and not x is Lazy:
d[k] = x
else:
d = {}
d['id'] = self.id
for k, v in self.properties.items():
if v.property_type == 'compound':
continue
t = self._old_values.get(k, None)
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x.id
else:
x = v.get_value_for_datastore(self, cached=True)
if t != self.field_str(x) and not x is Lazy:
d[k] = x
return d
def is_saved(self):
return bool(self.id)
def update(self, **data):
for k, v in data.iteritems():
if k in self.properties:
if not isinstance(self.properties[k], ManyToMany):
x = self.properties[k].get_value_for_datastore(self)
if self.field_str(x) != self.field_str(v):
setattr(self, k, v)
else:
setattr(self, k, v)
return self
def save(self, insert=False, changed=None, saved=None,
send_dispatch=True, version=False, version_fieldname=None,
version_exception=True):
"""
If insert=True, then it'll use insert() indead of update()
changed will be callback function, only when the non manytomany properties
are saved, the signature is:
def changed(created, old_data, new_data, obj=None):
if flag is true, then it means the record is changed
you can change new_data, and the new_data will be saved to database
version = Optimistic Concurrency Control
version_fieldname default is 'version'
if check_many, it'll auto check if manytomany value need to save,
only available in UPDATE
"""
_saved = False
created = False
version_fieldname = version_fieldname or 'version'
d = self._get_data()
#fix when d is empty, orm will not insert record bug 2013/04/07
if d or not self.id or insert:
if not self.id or insert:
created = True
old = d.copy()
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_save', instance=self, created=True, data=d, old_data=self._old_values, signal=self.tablename)
#process auto_now_add
_manytomany = {}
for k, v in self.properties.items():
if v.property_type == 'compound':
continue
if not isinstance(v, ManyToMany):
if isinstance(v, DateTimeProperty) and v.auto_now_add and k not in d:
d[k] = v.now()
elif (not k in d) and v.auto_add:
d[k] = v.default_value()
else:
if k in d:
_manytomany[k] = d.pop(k)
if d:
if callable(changed):
changed(self, created, self._old_values, d)
old.update(d)
obj = do_(self.table.insert().values(**d), self.get_session())
_saved = True
if obj.inserted_primary_key:
setattr(self, 'id', obj.inserted_primary_key[0])
if _manytomany:
for k, v in _manytomany.items():
if v:
_saved = getattr(self, k).update(v) or _saved
else:
_id = d.pop('id')
if d:
old = d.copy()
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_save', instance=self, created=False, data=d, old_data=self._old_values, signal=self.tablename)
#process auto_now
_manytomany = {}
for k, v in self.properties.items():
if v.property_type == 'compound' or k == 'id':
continue
if not isinstance(v, ManyToMany):
if isinstance(v, DateTimeProperty) and v.auto_now and k not in d:
d[k] = v.now()
elif (not k in d) and v.auto:
d[k] = v.default_value()
else:
if k in d:
_manytomany[k] = d.pop(k)
if d:
_cond = self.table.c.id == self.id
if version:
version_field = self.table.c.get(version_fieldname)
if version_field is None:
raise KindError("version_fieldname %s is not existed in Model %s" % (version_fieldname, self.__class__.__name__))
_version_value = getattr(self, version_fieldname, 0)
# setattr(self, version_fieldname, _version_value+1)
d[version_fieldname] = _version_value+1
_cond = (version_field == _version_value) & _cond
if callable(changed):
changed(self, created, self._old_values, d)
old.update(d)
result = do_(self.table.update(_cond).values(**d), self.get_session())
_saved = True
if version:
if result.rowcount != 1:
_saved = False
if version_exception:
raise SaveError("The record %s:%d has been saved by others, current version is %d" % (self.tablename, self.id, _version_value))
else:
setattr(self, version_fieldname, d[version_fieldname])
if _manytomany:
for k, v in _manytomany.items():
if v is not None:
_saved = getattr(self, k).update(v) or _saved
if _saved:
for k, v in d.items():
x = self.properties[k].get_value_for_datastore(self)
if self.field_str(x) != self.field_str(v):
setattr(self, k, v)
if send_dispatch and get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'post_save', instance=self, created=created, data=old, old_data=self._old_values, signal=self.tablename)
self.set_saved()
if callable(saved):
saved(self, created, self._old_values, old)
return _saved
def put(self, *args, **kwargs):
warnings.simplefilter('default')
warnings.warn("put method will be deprecated in next version.", DeprecationWarning)
return self.save(*args, **kwargs)
def delete(self, manytomany=True, delete_fieldname=None, send_dispatch=True,
onetoone=True):
"""
Delete current obj
:param manytomany: if also delete all manytomany relationships
:param delete_fieldname: if True then it'll use 'deleted', others will
be the property name
"""
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_delete', instance=self, signal=self.tablename)
if manytomany:
for k, v in self._manytomany.items():
getattr(self, k).clear()
if onetoone:
for k, v in self._onetoone.items():
row = getattr(self, k)
if row:
row.delete()
if delete_fieldname:
if delete_fieldname is True:
delete_fieldname = 'deleted'
if not hasattr(self, delete_fieldname):
raise KeyError("There is no %s property exists" % delete_fieldname)
setattr(self, delete_fieldname, True)
self.save()
else:
do_(self.table.delete(self.table.c.id==self.id), self.get_session())
self.id = None
self._old_values = {}
if send_dispatch and get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'post_delete', instance=self, signal=self.tablename)
def __repr__(self):
s = []
for k, v in self._fields_list:
if not isinstance(v, ManyToMany):
t = getattr(self, k, None)
if isinstance(v, Reference) and t:
s.append('%r:<%s:%d>' % (k, v.__class__.__name__, t.id))
else:
s.append('%r:%r' % (k, t))
if self.__class__._base_class:
clsname = self.__class__._base_class.__name__
else:
clsname = self.__class__.__name__
return ('<%s {' % clsname) + ','.join(s) + '}>'
def __str__(self):
return str(self.id)
def __unicode__(self):
return str(self.id)
def get_display_value(self, field_name, value=None):
return self.properties[field_name].get_display_value(value or getattr(self, field_name))
def get_datastore_value(self, field_name):
return self.properties[field_name].get_value_for_datastore(self)
#classmethod========================================================
@classmethod
def add_property(cls, name, prop, config=True, set_property=True):
if isinstance(prop, Property):
check_reserved_word(name)
#process if there is already the same property
old_prop = cls.properties.get(name)
if old_prop:
prop.creation_counter = old_prop.creation_counter
cls.properties[name] = prop
if config:
prop.__property_config__(cls, name)
if set_property:
setattr(cls, name, prop)
if hasattr(cls, '_fields_list'):
index = -1
for i, (n, p) in enumerate(cls._fields_list):
if name == n:
index = i
break
if index >= 0:
cls._fields_list[index] = (name, prop)
else:
cls._fields_list.append((name, prop))
cls._fields_list.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
else:
raise AttributeError("Prop should be instance of Property, but %r found" % prop)
@classmethod
def update_property(cls, name, prop, config=True, set_property=True):
if isinstance(prop, Property):
old_prop = cls.properties[name]
prop.creation_counter = old_prop.creation_counter
cls.properties[name] = prop
if config:
prop.__property_config__(cls, name)
if set_property:
setattr(cls, name, prop)
if hasattr(cls, '_fields_list'):
index = -1
for i, (n, p) in enumerate(cls._fields_list):
if name == n:
index = i
break
if index >= 0:
cls._fields_list[index] = (name, prop)
else:
raise AttributeError("Prop should be instance of Property, but %r found" % prop)
@classmethod
def get_collection_name(cls, from_class_name, collection_name=None, prefix=None):
"""
Get reference collection_name, if the collection_name is None
then make sure the collection_name is not conflict, but
if the collection_name is not None, then check if the collection_name
is already exists, if existed then raise Exception.
"""
if not collection_name:
collection_name = prefix + '_set'
if hasattr(cls, collection_name):
#if the xxx_set is already existed, then automatically
#create unique collection_set id
collection_name = prefix + '_set_' + str(cls._collection_set_id)
cls._collection_set_id += 1
else:
if collection_name in cls._collection_names:
if cls._collection_names.get(collection_name) != from_class_name:
raise DuplicatePropertyError("Model %s already has property %s" % (cls.__name__, collection_name))
return collection_name
@classmethod
def Reference(cls, name, model, reference_fieldname=None, collection_name=None, **kwargs):
field_from = getattr(cls, name)
if not field_from:
raise AttributeError("Field %s can't be found in Model %s" % (name, cls.tablename))
d = field_from.get_parameters()
d.update(kwargs)
prop = ReferenceProperty(reference_class=model,
reference_fieldname=reference_fieldname,
collection_name=collection_name,
**d)
cls.update_property(name, prop)
@classmethod
def OneToOne(cls, name, model, reference_fieldname=None, collection_name=None, **kwargs):
field_from = getattr(cls, name)
if not field_from:
raise AttributeError("Field %s can't be found in Model %s" % (name, cls.tablename))
d = field_from.get_parameters()
d.update(kwargs)
prop = OneToOne(reference_class=model,
reference_fieldname=reference_fieldname,
collection_name=collection_name,
**d)
cls.update_property(name, prop)
@classmethod
def ManyToMany(cls, name, model, collection_name=None,
reference_fieldname=None, reversed_fieldname=None, required=False,
through=None,
through_reference_fieldname=None, through_reversed_fieldname=None,
**kwargs):
prop = ManyToMany(reference_class=model,
collection_name=collection_name,
reference_fieldname=reference_fieldname,
reversed_fieldname=reversed_fieldname,
through=through,
through_reference_fieldname=through_reference_fieldname,
through_reversed_fieldname=through_reversed_fieldname,
**kwargs)
cls.add_property(name, prop)
#create property, it'll create Table object
prop.create(cls)
#create real table
if __auto_create__:
engine = cls.get_engine().engine
if not prop.through and not prop.table.exists(engine):
prop.table.create(engine, checkfirst=True)
@classmethod
def _set_tablename(cls, appname=None):
if not hasattr(cls, '__tablename__'):
name = get_tablename(cls.__name__)
else:
name = cls.__tablename__
if appname:
name = appname.lower() + '_' + name
cls.tablename = name
@classmethod
def get_session(cls):
if cls._connection:
return cls._connection
return get_session(cls.get_engine_name())
@classmethod
def get_engine_name(cls):
return cls._engine_name or __default_engine__
@classmethod
def get_engine(cls):
ec = cls.get_engine_name()
return engine_manager[ec]
@classmethod
def _use(cls, ec):
"""
underly implement of use
"""
# class ConnectModel(cls):
# pass
ConnectModel = type(cls.__name__, (cls,), {})
ConnectModel.tablename = cls.tablename
ConnectModel._base_class = cls
if isinstance(ec, (str, unicode)):
ConnectModel._engine_name = ec
elif isinstance(ec, Session):
ConnectModel._engine_name = ec.engine_name
ConnectModel._connection = ec
return ConnectModel
@classmethod
def use(cls, ec):
"""
use will duplicate a new Model class and bind ec
ec is Engine name or Sesstion object
"""
if isinstance(ec, (str, unicode)):
m = get_model(cls._alias, ec, signal=False)
else:
m = cls._use(ec)
return m
@classmethod
def bind(cls, metadata=None, auto_create=False, reset=False):
cls._lock.acquire()
try:
cls.metadata = metadata or find_metadata(cls)
if cls.metadata:
cols = []
cls.manytomany = []
#add pre_create process
for k, f in cls._fields_list:
func = getattr(f, 'pre_create', None)
if func:
func(cls)
for k, f in cls._fields_list:
c = f.create(cls)
if c is not None:
cols.append(c)
if not getattr(cls, '__dynamic__', False):
#check the model_path
if cls._base_class:
model_path = cls._base_class.__module__ + '.' + cls._base_class.__name__
else:
model_path = cls.__module__ + '.' + cls.__name__
_path = __models__.get(cls.tablename, {}).get('model_path', '')
if _path and model_path != _path:
return
#check if the table is already existed
t = cls.metadata.tables.get(cls.tablename, None)
if t is not None and not __auto_set_model__ and not reset:
return
if t is not None:
cls.metadata.remove(t)
args = getattr(cls, '__table_args__', {})
args['mysql_charset'] = 'utf8'
cls.table = Table(cls.tablename, cls.metadata, *cols, **args)
#add appname to self.table
appname = cls.__module__
cls.table.__appname__ = appname[:appname.rfind('.')]
#add __mapping_only__ property to Table object
cls.table.__mapping_only__ = getattr(cls, '__mapping_only__', False)
cls.c = cls.table.c
cls.columns = cls.table.c
if hasattr(cls, 'OnInit'):
cls.OnInit()
if auto_create:
#only metadata is and bound
#then the table will be created
#otherwise the creation of tables will be via: create_all(db)
if cls.metadata.bind:
cls.create()
set_model(cls, created=True)
else:
set_model(cls)
else:
if __auto_set_model__:
set_model(cls)
cls._bound_classname = cls._alias
finally:
cls._lock.release()
@classmethod
def create(cls):
cls._c_lock.acquire()
try:
engine = get_connection(cls.get_engine_name())
if not cls.table.exists(engine):
cls.table.create(engine, checkfirst=True)
for x in cls.manytomany:
if not x.exists(engine):
x.create(engine, checkfirst=True)
finally:
cls._c_lock.release()
@classmethod
def get(cls, id=None, condition=None, fields=None, cache=False, engine_name=None, **kwargs):
"""
Get object from Model, if given fields, then only fields will be loaded
into object, other properties will be Lazy
if cache is True or defined __cacheable__=True in Model class, it'll use cache first
"""
if id is None and condition is None:
return None
can_cacheable = (cache or getattr(cls, '__cacheable__', None)) and \
isinstance(id, (int, long, str, unicode))
if can_cacheable:
#send 'get_object' topic to get cached object
obj = dispatch.get(cls, 'get_object', id)
if obj:
return obj
if condition is not None:
_cond = condition
else:
if isinstance(id, (int, long)):
_cond = cls.c.id==id
elif isinstance(id, (str, unicode)) and id.isdigit():
_cond = cls.c.id==int(id)
else:
_cond = id
#if there is no cached object, then just fetch from database
obj = cls.filter(_cond, **kwargs).fields(*(fields or [])).one()
if obj and cache or getattr(cls, '__cacheable__', None):
dispatch.call(cls, 'set_object', instance=obj)
return obj
def put_cached(self):
dispatch.call(self.__class__, 'set_object', instance=self)
@classmethod
def get_or_notfound(cls, condition=None, fields=None):
obj = cls.get(condition, fields=fields)
if not obj:
raise NotFound("Can't found the object", cls, condition)
return obj
@classmethod
def _data_prepare(cls, record):
d = {}
for k, v in record:
p = cls.properties.get(k)
if p and not isinstance(p, ManyToMany):
d[str(k)] = p.make_value_from_datastore(v)
else:
d[str(k)] = v
return d
@classmethod
def all(cls, **kwargs):
return Result(cls, **kwargs)
@classmethod
def filter(cls, *condition, **kwargs):
return Result(cls, **kwargs).filter(*condition)
@classonlymethod
def remove(cls, condition=None, **kwargs):
if isinstance(condition, (int, long)):
condition = cls.c.id==condition
elif isinstance(condition, (tuple, list)):
condition = cls.c.id.in_(condition)
do_(cls.table.delete(condition, **kwargs), cls.get_session())
@classmethod
def count(cls, condition=None, **kwargs):
count = do_(cls.table.count(condition, **kwargs), cls.get_session()).scalar()
return count
@classmethod
def any(cls, *condition, **kwargs):
return Result(cls, **kwargs).filter(*condition).any()
@classmethod
def load(cls, values, set_saved=True, from_='db'):
if isinstance(values, (list, tuple)):
d = cls._data_prepare(values)
elif isinstance(values, dict):
d = values
else:
raise BadValueError("Can't support the data type %r" % values)
# if 'id' not in d or not d['id']:
# raise BadValueError("ID property must be existed or could not be empty.")
o = cls()
o._load(d, use_delay=True, from_=from_)
if set_saved:
o.set_saved()
return o
def refresh(self, fields=None, **kwargs):
"""
Re get the instance of current id
"""
cond = self.c.id==self.id
query = self.filter(cond, **kwargs)
if not fields:
fields = list(self.table.c)
v = query.values_one(*fields)
if not v:
raise NotFound('Instance <%s:%d> can not be found' % (self.tablename, self.id))
d = self._data_prepare(v.items())
self.update(**d)
self.set_saved()
def _load(self, data, use_delay=False, from_='db'):
if not data:
return
#compounds fields will be processed in the end
compounds = []
for prop in self.properties.values():
if from_ == 'db':
name = prop.fieldname
else:
name = prop.name
if name in data:
if prop.property_type == 'compound':
compounds.append(prop)
continue
value = data[name]
if from_ == 'dump':
value = prop.convert_dump(value)
else:
if prop.property_type == 'compound':
continue
# if use_delay or isinstance(prop, ManyToMany):
if use_delay:
value = Lazy
else:
if name != 'id':
value = prop.default_value()
else:
value = None
prop.__set__(self, value)
for prop in compounds:
if from_ == 'db':
name = prop.fieldname
else:
name = prop.name
if name in data:
value = data[name]
prop.__set__(self, value)
def dump(self, fields=None, exclude=None):
"""
Dump current object to dict, but the value is string
for manytomany fields will not automatically be dumpped, only when
they are given in fields parameter
"""
exclude = exclude or []
d = {}
if fields and 'id' not in fields:
fields = list(fields)
fields.append('id')
for k, v in self.properties.items():
if ((not fields) or (k in fields)) and (not exclude or (k not in exclude)):
if not isinstance(v, ManyToMany):
t = v.get_value_for_datastore(self)
if t is Lazy:
self.refresh()
t = v.get_value_for_datastore(self)
if isinstance(t, Model):
t = t.id
d[k] = v.to_str(t)
else:
if fields:
d[k] = ','.join([str(x) for x in getattr(self, v._lazy_value(), [])])
if d and 'id' not in d and 'id' in self.properties:
d['id'] = str(self.id)
return d
@classmethod
def migrate(cls, manytomany=True):
tables = [cls.tablename]
if manytomany:
for x in cls.manytomany:
tables.append(x.name)
migrate_tables(tables, cls.get_engine_name())
@classmethod
def clear_relation(cls):
"""
Clear relation properties for reference Model, such as OneToOne, Reference,
ManyToMany
"""
for k, v in cls.properties.items():
if isinstance(v, ReferenceProperty):
if hasattr(v, 'collection_name') and hasattr(v.reference_class, v.collection_name):
delattr(v.reference_class, v.collection_name)
if isinstance(v, OneToOne):
#append to reference_class._onetoone
del v.reference_class._onetoone[v.collection_name]
@classmethod
def get_columns_info(cls):
for k, v in cls._fields_list:
yield v.to_column_info()
|
bsd-2-clause
|
guillaume-philippon/aquilon
|
lib/aquilon/worker/processes.py
|
1
|
37384
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handling of external processes for the broker happens here.
Most methods will be called as part of a callback chain, and should
expect to handle a generic result from whatever happened earlier in
the chain.
"""
import os
import re
import logging
from contextlib import contextmanager
from subprocess import Popen, PIPE
from tempfile import mkdtemp
from threading import Thread
from six import iteritems
from mako.lookup import TemplateLookup
from twisted.python import context
from twisted.python.log import callWithContext, ILogContext
from aquilon.exceptions_ import (ProcessException, AquilonError, ArgumentError,
InternalError)
from aquilon.config import Config, running_from_source
from aquilon.aqdb.model import Machine
from aquilon.utils import remove_dir
LOGGER = logging.getLogger(__name__)
class StreamLoggerThread(Thread):
"""Helper class for streaming output as it becomes available."""
def __init__(self, logger, loglevel, process, stream, filterre=None,
context=None):
self.logger = logger
self.loglevel = loglevel
self.process = process
self.stream = stream
self.filterre = filterre
self.context = context
self.buffer = []
Thread.__init__(self)
def run(self):
while True:
data = self.stream.readline()
if data == '' and (self.stream.closed or
self.process.returncode is not None):
break
if data != '':
if self.filterre and not self.filterre.search(data):
continue
self.buffer.append(data)
if self.context:
callWithContext(self.context, self.logger.log,
self.loglevel, data.rstrip())
else:
self.logger.log(self.loglevel, data.rstrip())
def run_command(args, env=None, path="/", logger=LOGGER, loglevel=logging.INFO,
stream_level=None, filterre=None, input=None):
'''Run the specified command (args should be a list corresponding to ARGV).
Returns any output (stdout only). If the command fails, then
ProcessException will be raised. To pass the output back to the client
pass in a logger and specify loglevel as CLIENT_INFO.
To reduce the captured output, pass in a compiled regular expression
with the filterre keyword argument. Any output lines on stdout will
only be kept if filterre.search() finds a match.
'''
if env:
shell_env = env.copy()
else:
shell_env = {}
# Make sure that environment is properly kerberized.
for envname, envvalue in os.environ.items():
# AQTEST<something> is used by the testsuite
if envname.startswith("KRB") or envname.startswith("AQTEST"):
shell_env[envname] = envvalue
# Add a default value for the PATH.
for envname in ["PATH"]:
if envname not in shell_env and envname in os.environ:
shell_env[envname] = os.environ[envname]
# Force any arguments to be strings... takes care of unicode from
# the database.
command_args = [str(arg) for arg in args]
# If the command was not given with an absolute path, then check if there's
# an override specified in the config file. If not, we'll rely on $PATH.
if command_args[0][0] != "/":
config = Config()
command_args[0] = config.lookup_tool(command_args[0])
simple_command = " ".join(command_args)
logger.log(loglevel, "run_command: %s (CWD: %s)", simple_command,
os.path.abspath(path))
if input:
proc_stdin = PIPE
logger.info("command `%s` stdin: %s", simple_command, input)
else:
proc_stdin = None
# The context contains the log prefix
ctx = (context.get(ILogContext) or {}).copy()
p = Popen(args=command_args, stdin=proc_stdin, stdout=PIPE, stderr=PIPE,
cwd=path, env=shell_env)
# If we want to stream the command's output back to the client while the
# command is still executing, then we have to doit ourselves. Otherwise,
# p.communicate() does everything.
if stream_level is None:
out, err = p.communicate(input=input)
if filterre:
out = "\n".join(line for line in out.splitlines()
if filterre.search(line))
else:
out_thread = StreamLoggerThread(logger, stream_level, p, p.stdout,
filterre=filterre, context=ctx)
err_thread = StreamLoggerThread(logger, stream_level, p, p.stderr, context=ctx)
out_thread.start()
err_thread.start()
if proc_stdin:
p.stdin.write(input)
p.stdin.close()
p.wait()
out_thread.join()
err_thread.join()
out = "".join(out_thread.buffer)
err = "".join(err_thread.buffer)
if p.returncode >= 0:
logger.log(loglevel, "command `%s` exited with return code %d",
simple_command, p.returncode)
retcode = p.returncode
signal_num = None
else: # pragma: no cover
logger.log(loglevel, "command `%s` exited with signal %d",
simple_command, -p.returncode)
retcode = None
signal_num = -p.returncode
if err:
logger.log(loglevel, "command `%s` stderr: %s", simple_command, err)
if p.returncode != 0:
raise ProcessException(command=simple_command, out=out, err=err,
code=retcode, signalNum=signal_num,
filtered=bool(filterre))
return out
def run_git(args, env=None, path=".", logger=LOGGER, loglevel=logging.INFO,
filterre=None, stream_level=None):
config = Config()
if env:
git_env = env.copy()
else:
git_env = {}
git_env["PATH"] = git_env.get("PATH", os.environ.get("PATH", ""))
for name in ["git_author_name", "git_author_email",
"git_committer_name", "git_committer_email"]:
if not config.has_option("broker", name):
continue
value = config.get("broker", name)
git_env[name.upper()] = value
if isinstance(args, list):
git_args = args[:]
if git_args[0] != "git":
git_args.insert(0, "git")
else:
git_args = ["git", args]
return run_command(git_args, env=git_env, path=path, logger=logger,
loglevel=loglevel, filterre=filterre,
stream_level=stream_level)
def cache_version(config, logger=LOGGER):
"""Try to determine the broker version by examining the path
to this source file. If this file path matches
/aquilon/PROJ/aqd/<version>/ (likely /ms/dist) or
/aquilon/aqd/<version>/ (likely /ms/dev) then use <version>.
Otherwise, run git describe to get the most recent tag.
"""
if config.has_option("broker", "version"):
return
version_re = re.compile(r'/aquilon(?:/PROJ)?/aqd/([^/]+)/')
m = version_re.search(__file__)
if m and m.group(1) != "lib" and m.group(1) != "bin":
config.set("broker", "version", m.group(1))
return
try:
out = run_git("describe", logger=logger,
path=config.get("broker", "srcdir"))
config.set("broker", "version", out.strip())
except ProcessException as e:
logger.info("Could not run git describe to get version: %s", e)
config.set("broker", "version", "Unknown")
class GitRepo(object):
"""
Git repository wrapper
This class is not meant to be a simple wrapper around git, but rather to
implement higher level functions - even if some of those functions can be
translated to a single git command.
"""
def __init__(self, path, logger, loglevel=logging.INFO):
self.path = path
self.logger = logger
self.loglevel = loglevel
@staticmethod
def template_king(logger, loglevel=logging.INFO):
"""
Constructor for template-king
"""
config = Config()
return GitRepo(config.get("broker", "kingdir"), logger=logger,
loglevel=loglevel)
@staticmethod
def domain(domain, logger, loglevel=logging.INFO):
"""
Constructor for domains
"""
config = Config()
domainsdir = config.get('broker', 'domainsdir')
return GitRepo(os.path.join(domainsdir, domain), logger=logger,
loglevel=loglevel)
def run(self, args, filterre=None, stream_level=None):
return run_git(args, path=self.path, logger=self.logger,
loglevel=self.loglevel, filterre=filterre,
stream_level=stream_level)
def ref_contains_commit(self, commit_id, ref='HEAD'):
"""
Check if a given reference (by default, HEAD) contains a given commit ID
"""
filterre = re.compile('^' + commit_id + '$')
try:
found = self.run(['rev-list', ref], filterre=filterre)
except ProcessException as pe:
if pe.code != 128:
raise
else:
found = None
return found
def ref_commit(self, ref='HEAD', compel=True):
"""
Return the top commit of a ref, by default HEAD
"""
try:
commit = self.run(['rev-parse', '--verify', '-q', ref + '^{commit}'])
return commit.strip()
except ProcessException as pe:
if pe.code == 1:
if compel:
raise ArgumentError("Ref %s could not be translated to an "
"existing commit ID." % ref)
return None
raise
def ref_tree(self, ref='HEAD', compel=True):
"""
Return the tree ID a ref (by default, HEAD) points to
"""
try:
tree = self.run(['rev-parse', '--verify', '-q', ref + '^{tree}'])
return tree.strip()
except ProcessException as pe:
if pe.code == 1:
if compel:
raise ArgumentError("Ref %s not found.", ref)
return None
raise
@contextmanager
def temp_clone(self, branch):
"""
Create a temporary clone for working on the given branch
This function is a context manager meant to be used in a with statement.
The temporary clone is removed automatically.
"""
config = Config()
# TODO: is rundir suitable for this purpose?
rundir = config.get("broker", "rundir")
tempdir = mkdtemp(prefix="git_clone_", dir=rundir)
try:
run_git(["clone", "--shared", "--branch", branch, "--",
self.path, branch],
path=tempdir, logger=self.logger, loglevel=self.loglevel)
yield GitRepo(os.path.join(tempdir, branch), logger=self.logger,
loglevel=self.loglevel)
finally:
remove_dir(tempdir, logger=self.logger)
def push_origin(self, ref, force=False):
"""
Push a ref to the origin remote
"""
if force:
self.run(["push", "--force", "origin", ref])
else:
self.run(["push", "origin", ref])
IP_NOT_DEFINED_RE = re.compile(r"Host with IP address "
r"[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
r" is not defined")
BUILDING_NOT_FOUND = re.compile(r"bldg [a-zA-Z0-9]{2} doesn't exists")
CAMPUS_NOT_FOUND = re.compile(r"campus [a-zA-Z0-9]{2} doesn't exist")
DNS_DOMAIN_NOT_FOUND = re.compile(r"DNS domain ([-\w\.\d]+) doesn't exists")
DNS_DOMAIN_EXISTS = re.compile(r"DNS domain [-\w\.\d]+ already defined")
# The regexp is taken from DSDB
INVALID_NAME_RE = re.compile(r"[^A-Za-z0-9_.-]")
class DSDBRunner(object):
def __init__(self, logger=LOGGER):
config = Config()
self.logger = logger
self.dsdb_use_testdb = config.getboolean("broker", "dsdb_use_testdb")
self.location_sync = config.getboolean("broker", "dsdb_location_sync")
self.actions = []
self.rollback_list = []
def normalize_iface(self, iface):
return INVALID_NAME_RE.sub("_", iface)
def commit(self, verbose=False):
for args, rollback, error_filter, ignore_msg in self.actions:
cmd = ["dsdb"]
cmd.extend(args)
try:
if verbose:
self.logger.client_info("DSDB: %s" %
" ".join(str(a) for a in args))
run_command(cmd, env=self.getenv(), logger=self.logger)
except ProcessException as err:
if error_filter and err.out and error_filter.search(err.out):
self.logger.warning(ignore_msg)
else:
raise
if rollback:
self.rollback_list.append(rollback)
def rollback(self, verbose=False):
self.rollback_list.reverse()
rollback_failures = []
for args in self.rollback_list:
cmd = ["dsdb"]
cmd.extend(args)
try:
self.logger.client_info("DSDB: %s" %
" ".join(str(a) for a in args))
run_command(cmd, env=self.getenv(), logger=self.logger)
except ProcessException as err:
rollback_failures.append(str(err))
did_something = bool(self.rollback_list)
del self.rollback_list[:]
if rollback_failures:
raise AquilonError("DSDB rollback failed, DSDB state is "
"inconsistent: " + "\n".join(rollback_failures))
elif did_something:
self.logger.client_info("DSDB rollback completed.")
def commit_or_rollback(self, error_msg=None, verbose=False):
try:
self.commit(verbose=verbose)
except ProcessException as err:
if not error_msg:
error_msg = "DSDB update failed"
self.logger.warning(str(err))
self.rollback(verbose=verbose)
raise ArgumentError(error_msg)
def add_action(self, command_args, rollback_args, error_filter=None,
ignore_msg=False):
"""
Register an action to execute and it's rollback counterpart.
command_args: the DSDB command to execute
rollback_args: the DSDB command to execute on rollback
error_filter: regexp of error messages in the output of dsdb that
should be ignored
ignore_msg: message to log if the error_filter matched
"""
if error_filter and not ignore_msg:
raise InternalError("Specifying an error filter needs the message "
"specified as well.")
self.actions.append((command_args, rollback_args, error_filter,
ignore_msg))
def getenv(self):
if self.dsdb_use_testdb:
return {"DSDB_USE_TESTDB": "true"}
return None
def add_campus(self, campus, comments):
if not self.location_sync:
return
command = ["add_campus_aq", "-campus_name", campus]
if comments:
command.extend(["-comments", comments])
rollback = ["delete_campus_aq", "-campus", campus]
self.add_action(command, rollback)
def del_campus(self, campus):
if not self.location_sync:
return
command = ["delete_campus_aq", "-campus", campus]
rollback = ["add_campus_aq", "-campus_name", campus]
self.add_action(command, rollback, CAMPUS_NOT_FOUND,
"DSDB does not have campus %s defined, proceeding.")
def add_city(self, city, country, fullname):
if not self.location_sync:
return
command = ["add_city_aq", "-city_symbol", city, "-country_symbol",
country, "-city_name", fullname]
rollback = ["delete_city_aq", "-city", city]
self.add_action(command, rollback)
def update_city(self, city, campus, prev_campus):
if not self.location_sync:
return
command = ["update_city_aq", "-city", city, "-campus", campus]
# We can't revert to an empty campus
if prev_campus:
rollback = ["update_city_aq", "-city", city, "-campus", prev_campus]
else:
rollback = None
self.add_action(command, rollback)
def del_city(self, city, old_country, old_fullname):
if not self.location_sync:
return
command = ["delete_city_aq", "-city", city]
rollback = ["add_city_aq", "-city_symbol", city, "-country_symbol",
old_country, "-city_name", old_fullname]
self.add_action(command, rollback)
def add_campus_building(self, campus, building):
if not self.location_sync:
return
command = ["add_campus_building_aq", "-campus_name", campus,
"-building_name", building]
rollback = ["delete_campus_building_aq", "-campus_name", campus,
"-building_name", building]
self.add_action(command, rollback)
def add_building(self, building, city, building_addr):
if not self.location_sync:
return
command = ["add_building_aq", "-building_name", building, "-city", city,
"-building_addr", building_addr]
rollback = ["delete_building_aq", "-building", building]
self.add_action(command, rollback)
def del_campus_building(self, campus, building):
if not self.location_sync:
return
command = ["delete_campus_building_aq", "-campus_name", campus,
"-building_name", building]
rollback = ["add_campus_building_aq", "-campus_name", campus,
"-building_name", building]
self.add_action(command, rollback)
def del_building(self, building, old_city, old_addr):
if not self.location_sync:
return
command = ["delete_building_aq", "-building", building]
rollback = ["add_building_aq", "-building_name", building,
"-city", old_city, "-building_addr", old_addr]
self.add_action(command, rollback, BUILDING_NOT_FOUND,
"DSDB does not have building %s defined, "
"proceeding." % building)
def update_building(self, building, address, old_addr):
if not self.location_sync:
return
command = ["update_building_aq", "-building_name", building,
"-building_addr", address]
rollback = ["update_building_aq", "-building_name", building,
"-building_addr", old_addr]
self.add_action(command, rollback)
def add_host_details(self, fqdn, ip, iface=None, mac=None, primary=None,
comments=None, **_):
command = ["add_host", "-host_name", fqdn,
"-ip_address", ip, "-status", "aq"]
if iface:
command.extend(["-interface_name", self.normalize_iface(iface)])
if mac:
command.extend(["-ethernet_address", mac])
if primary and str(primary) != str(fqdn):
command.extend(["-primary_host_name", primary])
if comments:
command.extend(["-comments", comments])
rollback = ["delete_host", "-ip_address", ip]
self.add_action(command, rollback)
def update_host_details(self, fqdn, iface=None, new_ip=None, new_mac=None,
new_comments=None, old_ip=None, old_mac=None,
old_comments=None, **_):
command = ["update_aqd_host", "-host_name", fqdn]
if iface:
iface = self.normalize_iface(iface)
command.extend(["-interface_name", iface])
rollback = command[:]
if new_ip and new_ip != old_ip:
command.extend(["-ip_address", new_ip])
rollback.extend(["-ip_address", old_ip])
if new_mac and new_mac != old_mac:
command.extend(["-ethernet_address", new_mac])
rollback.extend(["-ethernet_address", old_mac])
if new_comments != old_comments:
command.extend(["-comments", new_comments or ""])
rollback.extend(["-comments", old_comments or ""])
self.add_action(command, rollback)
def update_host_iface_name(self, old_fqdn, new_fqdn,
old_iface, new_iface, **_):
old_iface = self.normalize_iface(old_iface)
new_iface = self.normalize_iface(new_iface)
command = ["update_aqd_host", "-host_name", old_fqdn]
rollback = ["update_aqd_host", "-host_name", new_fqdn]
if old_fqdn != new_fqdn:
command.extend(["-new_host_name", new_fqdn])
rollback.extend(["-new_host_name", old_fqdn])
if old_iface and old_iface != new_iface:
command.extend(["-interface_name", old_iface,
"-new_interface_name", new_iface])
rollback.extend(["-interface_name", new_iface,
"-new_interface_name", old_iface])
self.add_action(command, rollback)
def delete_host_details(self, fqdn, ip, iface=None, mac=None, primary=None,
comments=None, **_):
command = ["delete_host", "-ip_address", ip]
rollback = ["add_host", "-host_name", fqdn,
"-ip_address", ip, "-status", "aq"]
if iface:
rollback.extend(["-interface_name", self.normalize_iface(iface)])
if mac:
rollback.extend(["-ethernet_address", mac])
if primary and str(primary) != str(fqdn):
rollback.extend(["-primary_host_name", primary])
if comments:
rollback.extend(["-comments", comments])
self.add_action(command, rollback, IP_NOT_DEFINED_RE,
"DSDB did not have a host with this IP address, "
"proceeding.")
@classmethod
def snapshot_hw(cls, dbhw_ent):
"""
Make a snapshot of the interface parameters.
update_host() will use this snapshot to decide what has changed and
what DSDB commands have to be executed.
Comment handling is a bit complicated, because we have more ways to
store comments in Aquilon than in DSDB. The rules are:
- If the interface has a comment, use that.
- Otherwise take the comment from the hardware entity.
Exception: management interfaces
"""
real_primary = dbhw_ent.fqdn
hwdata = {"by-ip": {},
"by-fqdn": {},
"primary": real_primary}
# For each of the addresses held by this hardware_entity we need to
# create an entry in DSDB. The following loop makes a snapshot of
# expected state of the information in DSDB.
for addr in dbhw_ent.all_addresses():
# Do not propergate to DSDB if the network is not internal,
# there are no FQDN's associated with this address, or
# the address is shared with other devices.
if not addr.network.is_internal:
continue
if not addr.fqdns:
continue
if addr.is_shared:
continue
# In AQDB there may be multiple domain names associated with
# an address, in DSDB there can only be one. Thus we pick
# the first address to propergate.
dns_record = addr.dns_records[0]
# By default we take the comments from the hardware_entity,
# if an interface comment exists then this will be taken
# in preference. Management interfaces are added as stand-alone
# entries, therefore we do not take the hardware_entity comment
# but allow the following code to take it from the interface.
if addr.interface.interface_type != 'management':
comments = dbhw_ent.comments
else:
comments = None
iface = addr.logical_name
if addr.interface.comments and not \
addr.interface.comments.startswith("Created automatically"):
comments = addr.interface.comments
# Determine if we need to specify a primary name to DSDB. By
# doing so we are associating this record with another.
# Note, the existence of a primary hostname affects the order
# that entriers are processed in update_host()
if addr.interface.interface_type == "management":
# Do not use -primary_host_name for the management address
# as we do not wish to associate them with the host currently
# on the machine (which may change).
primary = None
elif str(dns_record.fqdn) == real_primary:
# Avoid circular dependency - do not set the 'primary' key for
# the real primary name
primary = None
elif not isinstance(dbhw_ent, Machine):
# Not a machine - we don't care about srvloc
primary = real_primary
elif dns_record.reverse_ptr and str(dns_record.reverse_ptr.fqdn) == real_primary:
# If the reverse PTR record points to the primary name in AQDB,
# then pass the -primary_name flag to DSDB
primary = real_primary
else:
# Avoid using -primary_name, to please srvloc
primary = None
# Exclude the MAC address for aliases
if addr.label:
mac = None
else:
mac = addr.interface.mac
ifdata = {'iface': iface,
'ip': addr.ip,
'mac': mac,
'fqdn': str(dns_record.fqdn),
'primary': primary,
'comments': comments}
hwdata["by-ip"][ifdata["ip"]] = ifdata
hwdata["by-fqdn"][ifdata["fqdn"]] = ifdata
# The primary address of Zebra hosts needs extra care. Here, we cheat a
# bit - we do not check if the primary name is a service address, but
# instead check if it has an IP address and it was not handled above.
if dbhw_ent.primary_ip and \
str(dbhw_ent.primary_name.fqdn) not in hwdata["by-fqdn"]:
ifdata = {'iface': "vip",
'ip': dbhw_ent.primary_ip,
'mac': None,
'fqdn': str(dbhw_ent.primary_name),
'primary': None,
'comments': None}
hwdata["by-ip"][ifdata["ip"]] = ifdata
hwdata["by-fqdn"][ifdata["fqdn"]] = ifdata
return hwdata
def update_host(self, dbhw_ent, old_hwdata):
"""Update a dsdb host entry.
The calling code (the aq update_interface command) treats the
hostname and interface name (except for zebra hosts!) as unchanging .
There is an update_host dsdb command that lets the mac address,
ip address (and comments, if we kept them) change.
Any other changes have to be done by removing the old DSDB
entry and adding a new one.
Please note that in case of zebra interfaces adding a new ip address
to the same interface may result in adding/removing DSDB entries.
"""
if dbhw_ent:
new_hwdata = self.snapshot_hw(dbhw_ent)
else:
new_hwdata = {"by-ip": {},
"by-fqdn": {},
"primary": None}
if not old_hwdata:
old_hwdata = {"by-ip": {},
"by-fqdn": {},
"primary": None}
deletes = []
adds = []
# Host/interface names cannot be updated simultaneously with IP/MAC
# addresses or comments
addr_updates = []
name_updates = []
# Run through all of the entries in the old snapshot and attempt
# to match them to their corrisponding new entry.
for fqdn, old_ifdata in old_hwdata["by-fqdn"].items():
# Locate the new information about this address by either
# its FQDN or IP address.
if fqdn in new_hwdata["by-fqdn"]:
new_ifdata = new_hwdata["by-fqdn"][fqdn]
elif old_ifdata["ip"] in new_hwdata["by-ip"]:
new_ifdata = new_hwdata["by-ip"][old_ifdata["ip"]]
else:
new_ifdata = None
# If either the old or the new entry is bound to a primary name but
# the other is not, then we have to delete & re-add it. Note this
# will be re-added in the following loop as we did not delete the
# entry from new_hwdata.
if new_ifdata and bool(old_ifdata["primary"]) != bool(new_ifdata["primary"]):
new_ifdata = None
# If there is no new data then record a delete (note above).
if not new_ifdata:
deletes.append(old_ifdata)
continue
# Create a dict with entries in old_ifdata prefiexd with 'old_'
# and entries in new_ifdata prefixed with 'new_'
kwargs = {p + k: v
for (p, d) in [('old_', old_ifdata),
('new_', new_ifdata)]
for k, v in iteritems(d)}
if old_ifdata['ip'] != new_ifdata['ip'] or \
old_ifdata['mac'] != new_ifdata['mac'] or \
old_ifdata['comments'] != new_ifdata['comments']:
addr_updates.append(kwargs)
if old_ifdata['fqdn'] != new_ifdata['fqdn'] or \
old_ifdata['iface'] != new_ifdata['iface']:
name_updates.append(kwargs)
# Delete the entries from new_hwdata. We have recorded an
# update. The contents of new_hwdata is used in the following
# loop to record additions.
del new_hwdata["by-fqdn"][new_ifdata["fqdn"]]
del new_hwdata["by-ip"][new_ifdata["ip"]]
# For all of the recoreds remaining in new_hwdata (see above)
# record an addtion opperation.
adds = new_hwdata["by-fqdn"].values()
# Add the primary address first, and delete it last. The primary address
# is identified by having an empty ['primary'] key (this is true for the
# management address as well, but that does not matter).
sort_by_primary = lambda x: x['primary'] or ""
adds.sort(key=sort_by_primary)
deletes.sort(key=sort_by_primary, reverse=True)
for attrs in deletes:
self.delete_host_details(**attrs)
for kwargs in addr_updates:
# The old FQDN and interface name are the fixed point
self.update_host_details(fqdn=kwargs['old_fqdn'],
iface=kwargs['old_iface'],
**kwargs)
for kwargs in name_updates:
self.update_host_iface_name(**kwargs)
for attrs in adds:
self.add_host_details(**attrs)
def add_dns_domain(self, dns_domain, comments):
if not comments:
# DSDB requires the comments field, even if it is empty
comments = ""
command = ["add_dns_domain", "-domain_name", dns_domain,
"-comments", comments]
rollback = ["delete_dns_domain", "-domain_name", dns_domain]
self.add_action(command, rollback, DNS_DOMAIN_EXISTS,
"The DNS domain %s already exists in DSDB, "
"proceeding." % dns_domain)
def delete_dns_domain(self, dns_domain, old_comments):
command = ["delete_dns_domain", "-domain_name", dns_domain]
rollback = ["add_dns_domain", "-domain_name", dns_domain,
"-comments", old_comments]
self.add_action(command, rollback, DNS_DOMAIN_NOT_FOUND,
"The DNS domain %s does not exist in DSDB, "
"proceeding." % dns_domain)
rack_row_re = re.compile(r'^\s*Row:\s*\b([-\w]+)\b$', re.M)
rack_col_re = re.compile(r'^\s*Column:\s*\b([-\w]+)\b$', re.M)
def show_rack(self, rackname):
out = run_command(["dsdb", "show_rack", "-rack_name", rackname],
env=self.getenv())
rack_row = self.rack_row_re.search(out)
rack_col = self.rack_col_re.search(out)
fields = {}
fields["rack_row"] = rack_row and rack_row.group(1) or None
fields["rack_col"] = rack_col and rack_col.group(1) or None
if not fields["rack_row"] or not fields["rack_col"]:
raise ValueError("Rack %s is missing row and/or col data")
return fields
primary_re = re.compile(r'^\s*Primary Name:\s*\b([-\w]+)\b$', re.M)
node_re = re.compile(r'^\s*Node:\s*\b([-\w]+)\b$', re.M)
dns_re = re.compile(r'^\s*DNS Domain:\s*\b([-\w\.]+)\b$', re.M)
state_re = re.compile(r'^\s*State:\s*\b(\d+)\b$', re.M)
def show_host(self, hostname):
(short, dot, dns_domain) = hostname.partition(".")
fields = {}
if not dot:
fields["fqdn"] = short + ".ms.com"
fields["dsdb_lookup"] = short
elif not dns_domain:
fields["fqdn"] = short + "ms.com"
fields["dsdb_lookup"] = short
elif dns_domain != "ms.com":
fields["fqdn"] = hostname
fields["dsdb_lookup"] = hostname
else:
fields["fqdn"] = hostname
fields["dsdb_lookup"] = short
out = run_command(["dsdb", "show_host",
"-host_name", fields["dsdb_lookup"]],
env=self.getenv())
primary = self.primary_re.search(out)
node = self.node_re.search(out)
dns = self.dns_re.search(out)
state = self.state_re.search(out)
fields["primary_name"] = primary and primary.group(1) or None
fields["node"] = node and node.group(1) or None
fields["dns"] = dns and dns.group(1) or None
if state:
fields["state"] = int(state.group(1))
else:
fields["state"] = None
return fields
def add_alias(self, alias, target, comments):
command = ["add_host_alias", "-host_name", target,
"-alias_name", alias]
if comments:
command.extend(["-comments", comments])
rollback = ["delete_host_alias", "-alias_name", alias]
self.add_action(command, rollback)
def del_alias(self, alias, old_target, old_comments):
command = ["delete_host_alias", "-alias_name", alias]
rollback = ["add_host_alias", "-host_name", old_target,
"-alias_name", alias]
if old_comments:
rollback.extend(["-comments", old_comments])
self.add_action(command, rollback)
def update_alias(self, alias, target, comments, old_target, old_comments):
command = ["update_host_alias", "-alias", alias,
"-new_host", target]
rollback = ["update_host_alias", "-alias", alias,
"-new_host", old_target]
if comments != old_comments:
command.extend(["-new_comments", comments or ""])
rollback.extend(["-new_comments", old_comments or ""])
self.add_action(command, rollback)
def build_mako_lookup(config, kind, **kwargs):
# This duplicates the logic from lookup_file_path(), but we don't want to
# move the mako dependency to aquilon.config
srcdir = config.get("broker", "srcdir")
srcpath = os.path.join(srcdir, "etc", "mako", kind)
directories = []
if running_from_source():
# If we're running from the source, then ignore any installed files
directories.append(srcpath)
else:
directories.append(os.path.join("/etc", "aquilon", "mako", kind))
directories.append(os.path.join("/usr", "share", "aquilon", "mako", kind))
if os.path.exists(srcpath):
directories.append(srcpath)
return TemplateLookup(directories=directories, **kwargs)
|
apache-2.0
|
kofkings/RSA_python
|
RSA.py
|
1
|
5017
|
import random
from fractions import gcd
def RSA(plainText):
# Generate Key
# p, q is 2 random large prime (512 bit)
p = generateLargePrime(512)
q = generateLargePrime(512)
while p == q:
q = generateLargePrime(512)
n = p * q
totientN = (p - 1) * (q - 1)
# PublicKey = random in (2, totientN - 1) | gcd(PublicKey, totientN) = 1
# PrivateKey * PublicKey = 1 (% totientN)
publicKey = random.randrange(2, totientN - 1)
while not (gcd(publicKey, totientN) == 1):
publicKey = random.randrange(2, totientN - 1)
privateKey = bezout(publicKey, totientN)
while privateKey < 0:
privateKey += totientN
print "Key:"
print " N:", n
print " Public Key:", publicKey
print " Private Key", privateKey
# Encrypt CipherTextNumber = PlainTextNumber ^ PublicKey % N
# Devide to many block | plainTextNumber < N
blockSize = n.bit_length() / 8
print 'Plain Text :', plainText
# Get PlainTextNumber
arrayPlainTextNumber = []
for i in xrange(len(plainText) / blockSize + 1):
arrayPlainTextNumber.append(textToNum(plainText[:blockSize]))
plainText = plainText[blockSize:]
print "Plain Text Number : ", arrayPlainTextNumber
arrayCipherTextNumber = []
for i in arrayPlainTextNumber:
arrayCipherTextNumber.append(pow(i, publicKey, n))
print "Cipher Text Number : ", arrayCipherTextNumber
# Decrypt DecryptTextNumber = CipherTextNumber ^ PrivateKey % N
decryptedText = ''
arrayDecryptedTextNumber = []
for i in arrayCipherTextNumber:
decryptedNumber = pow(i, privateKey, n)
arrayDecryptedTextNumber.append(decryptedNumber)
decryptedText += numToText(decryptedNumber)
print "Decrypted Text Number :", arrayDecryptedTextNumber
print "Decrypted Text :", decryptedText
def isPrime(n):
# Probaly large prime
# Low base for quicker test
lowPrimes = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89,
97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181,
191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619,
631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743,
751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]
if (n > 3):
# Test base 2
if (n & 1 != 0):
# Test base low prime
for p in lowPrimes:
if (n % p == 0):
return False
# Miller-Rabbin
# n - 1 = 2 ^ s * m
m, s = n - 1, 0
while m & 1 == 0:
m, s = m >> 1, s + 1
# Loop k time (error bound 4 ^ -k)
for i in xrange(50):
a = random.randrange(2, n - 2)
if not strong_pseudoprime(n, a, s, m):
return False
return True
return False
def strong_pseudoprime(n, a, s, m):
# Odd composite number n = m * 2 ^ s + 1 is called a strong (Fermat) pseudoprime when one of the following conditions holds:
# a ^ m % n = 1
# or a ^ (d * 2 ^ r) = n - 1 for 0 <= r < s
b = pow(a, m, n)
if b == 1:
return True
for i in xrange(s):
if b == n - 1:
return True
b = b * b % n
return False
def generateLargePrime(k):
# Random number then check primality
n = random.randrange(2 ** (k - 1), 2 ** (k))
while not isPrime(n):
n = random.randrange(2 ** (k - 1), 2 ** (k))
return n
def bezout(a, b):
# Bezout: Input a, b can find x, y, gcd(a, b) | x * a + y * b = gcd(a, b)
# In RSA:
# a <=> PublicKey, b <=> totientN, x <=> PrivateKey
x1, y1 = 1, 0
x2, y2 = 0, 1
while b:
temp = a // b
a, b = b, a % b
x1, y1, x2, y2 = x2, y2, x1 - temp * x2, y1 - temp * y2
return x1
def textToNum(textString):
# ASCII as number base 256 number
number = 0
for character in textString:
number = (number << 8) + ord(character)
return number
def numToText(number):
# ASCII as number base 256 number
textString = ''
while number:
textString = chr(number % 256) + textString
number >>= 8
return textString
if __name__ == '__main__':
message = raw_input("Type a message to test:")
RSA(message)
raw_input("Press a key to exit")
# Sorry about my English
|
gpl-3.0
|
CuriosoInformatico/HoneyCheck
|
dhcp_watchmen.py
|
1
|
3960
|
import logging.config
logging.getLogger('scapy.runtime').setLevel(logging.ERROR)
from threading import Thread
from scapy.all import *
import time
logger = logging.getLogger(name='elchicodepython.honeycheck')
def exec_array( array, **kwargs):
for object, method in array:
method(object, **kwargs) # == object.method()
class DHCPServer:
def __init__(self, ip, hw):
self.ip = ip
self.hw = hw
def __repr__(self):
return '<DHCPServer Object (ip = %s, hw = %s)>' % (self.ip, self.hw)
def __str__(self):
return '<DHCPServer Object (ip = %s, hw = %s)>' % (self.ip, self.hw)
class Status:
OK = 1
ROGUE_DETECTED = 2
class DHCPWatchmen:
def __init__(self, iface, fail_test, pass_test, final_exec, whitelist):
'''
:param iface: interface to watch
:param fail_test: action to trigger if a rogue dhcp server is detected
:param pass_test: action to trigger if there are no rogue dhcp servers detected
:param final_exec: action to trigger always after fail_test or pass_test
:param whitelist: list of IPs of verified DHCP servers to ignore.
'''
self.iface = iface
self.hw = get_if_hwaddr(iface)
self.fail_test = fail_test
self.pass_test = pass_test
self.final_exec = final_exec
self.whitelist = whitelist
self.dhcp_servers = {}
self.last_status = Status.OK
def check_dhcp_servers(self, number_allowed):
'''
Check if the number of DHCP Servers detected is allowed
and trigger the corresponding action to each situation
:param number_allowed: number of dhcp_servers allowed
'''
if len(self.dhcp_servers) > number_allowed:
if self.last_status != Status.ROGUE_DETECTED:
logger.warning('MORE DHCP SERVERS THAN ALLOWED: ')
self.last_status = Status.ROGUE_DETECTED
exec_array(self.fail_test, watchmen = self)
self.dhcp_servers = {}
else:
if self.last_status != Status.OK:
logger.info('All seems right')
self.last_status = Status.OK
exec_array(self.pass_test, watchmen = self)
exec_array(self.final_exec, watchmen=self)
def check_packet(self, packet):
if packet.payload.op == 2:
if self.whitelist:
if (packet.payload.src not in self.whitelist):
self.dhcp_servers[packet.payload.src] = DHCPServer(packet.payload.src, packet.src)
else:
self.dhcp_servers[packet.payload.src] = DHCPServer(packet.payload.src, packet.src)
def send_dhcp_discovery(self):
dhcp_discover = Ether(dst="ff:ff:ff:ff:ff:ff") / IP(src="0.0.0.0", dst="255.255.255.255") / UDP(sport=68, dport=67) / BOOTP(chaddr=self.hw, flags = 0x8000) / DHCP(options=[("message-type", "discover"), "end"])
sendp(dhcp_discover, verbose = 0)
logger.debug('DHCP DISCOVER SEND')
def dhcp_discovery_daemon(self, timeout):
if self.whitelist:
# There are no supposed to be DHCP servers that dont exists in the whitelist
logger.info('Whitelist enabled for ' + self.iface)
max_servers_allowed = 0
else:
# It is suppose to be at least one DHCP Server in the network
logger.info('Executing HoneyCheck in %s without Whitelist' % self.iface)
max_servers_allowed = 1
while True:
self.send_dhcp_discovery()
time.sleep(timeout)
self.check_dhcp_servers(max_servers_allowed)
def sniff_dhcp(self):
sniff(iface = self.iface, filter='udp port 68', prn = self.check_packet)
def __repr__(self):
return '<DHCPSWatchmen Object (iface = %s)>' % (self.iface)
def __str__(self):
return '<DHCPSWatchmen Object (iface = %s)>' % (self.iface)
|
mit
|
MotorolaMobilityLLC/external-chromium_org
|
third_party/gtk+/gtk/compose-parse.py
|
149
|
34346
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# compose-parse.py, version 1.3
#
# multifunction script that helps manage the compose sequence table in GTK+ (gtk/gtkimcontextsimple.c)
# the script produces statistics and information about the whole process, run with --help for more.
#
# You may need to switch your python installation to utf-8, if you get 'ascii' codec errors.
#
# Complain to Simos Xenitellis ([email protected], http://simos.info/blog) for this craft.
from re import findall, match, split, sub
from string import atoi
from unicodedata import normalize
from urllib import urlretrieve
from os.path import isfile, getsize
from copy import copy
import sys
import getopt
# We grab files off the web, left and right.
URL_COMPOSE = 'http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre'
URL_KEYSYMSTXT = "http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt"
URL_GDKKEYSYMSH = "http://git.gnome.org/browse/gtk%2B/plain/gdk/gdkkeysyms.h"
URL_UNICODEDATATXT = 'http://www.unicode.org/Public/5.2.0/ucd/UnicodeData.txt'
FILENAME_COMPOSE_SUPPLEMENTARY = 'gtk-compose-lookaside.txt'
# We currently support keysyms of size 2; once upstream xorg gets sorted,
# we might produce some tables with size 2 and some with size 4.
SIZEOFINT = 2
# Current max compose sequence length; in case it gets increased.
WIDTHOFCOMPOSETABLE = 5
keysymdatabase = {}
keysymunicodedatabase = {}
unicodedatabase = {}
headerfile_start = """/* GTK - The GIMP Tool Kit
* Copyright (C) 2007, 2008 GNOME Foundation
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/*
* File auto-generated from script found at http://bugzilla.gnome.org/show_bug.cgi?id=321896
* using the input files
* Input : http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre
* Input : http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt
* Input : http://www.unicode.org/Public/UNIDATA/UnicodeData.txt
*
* This table is optimised for space and requires special handling to access the content.
* This table is used solely by http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimple.c
*
* The resulting file is placed at http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimpleseqs.h
* This file is described in bug report http://bugzilla.gnome.org/show_bug.cgi?id=321896
*/
/*
* Modified by the GTK+ Team and others 2007, 2008. See the AUTHORS
* file for a list of people on the GTK+ Team. See the ChangeLog
* files for a list of changes. These files are distributed with
* GTK+ at ftp://ftp.gtk.org/pub/gtk/.
*/
#ifndef __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
#define __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
/* === These are the original comments of the file; we keep for historical purposes ===
*
* The following table was generated from the X compose tables include with
* XFree86 4.0 using a set of Perl scripts. Contact Owen Taylor <[email protected]>
* to obtain the relevant perl scripts.
*
* The following compose letter letter sequences confliced
* Dstroke/dstroke and ETH/eth; resolved to Dstroke (Croation, Vietnamese, Lappish), over
* ETH (Icelandic, Faroese, old English, IPA) [ D- -D d- -d ]
* Amacron/amacron and ordfeminine; resolved to ordfeminine [ _A A_ a_ _a ]
* Amacron/amacron and Atilde/atilde; resolved to atilde [ -A A- a- -a ]
* Omacron/Omacron and masculine; resolved to masculine [ _O O_ o_ _o ]
* Omacron/omacron and Otilde/atilde; resolved to otilde [ -O O- o- -o ]
*
* [ Amacron and Omacron are in Latin-4 (Baltic). ordfeminine and masculine are used for
* spanish. atilde and otilde are used at least for Portuguese ]
*
* at and Aring; resolved to Aring [ AA ]
* guillemotleft and caron; resolved to guillemotleft [ << ]
* ogonek and cedilla; resolved to cedilla [ ,, ]
*
* This probably should be resolved by first checking an additional set of compose tables
* that depend on the locale or selected input method.
*/
static const guint16 gtk_compose_seqs_compact[] = {"""
headerfile_end = """};
#endif /* __GTK_IM_CONTEXT_SIMPLE_SEQS_H__ */
"""
def stringtohex(str): return atoi(str, 16)
def factorial(n):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def uniq(*args) :
""" Performs a uniq operation on a list or lists """
theInputList = []
for theList in args:
theInputList += theList
theFinalList = []
for elem in theInputList:
if elem not in theFinalList:
theFinalList.append(elem)
return theFinalList
def all_permutations(seq):
""" Borrowed from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252178 """
""" Produces all permutations of the items of a list """
if len(seq) <=1:
yield seq
else:
for perm in all_permutations(seq[1:]):
for i in range(len(perm)+1):
#nb str[0:1] works in both string and list contexts
yield perm[:i] + seq[0:1] + perm[i:]
def usage():
print """compose-parse available parameters:
-h, --help this craft
-s, --statistics show overall statistics (both algorithmic, non-algorithmic)
-a, --algorithmic show sequences saved with algorithmic optimisation
-g, --gtk show entries that go to GTK+
-u, --unicodedatatxt show compose sequences derived from UnicodeData.txt (from unicode.org)
-v, --verbose show verbose output
-p, --plane1 show plane1 compose sequences
-n, --numeric when used with --gtk, create file with numeric values only
-e, --gtk-expanded when used with --gtk, create file that repeats first column; not usable in GTK+
--all-sequences when used with --gtk, create file with entries rejected by default
Default is to show statistics.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "pvgashune", ["help", "algorithmic", "statistics", "unicodedatatxt",
"stats", "gtk", "verbose", "plane1", "numeric", "gtk-expanded", "all-sequences"])
except:
usage()
sys.exit(2)
opt_statistics = False
opt_algorithmic = False
opt_gtk = False
opt_unicodedatatxt = False
opt_verbose = False
opt_plane1 = False
opt_numeric = False
opt_gtkexpanded = False
opt_allsequences = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-s", "--statistics"):
opt_statistics = True
if o in ("-a", "--algorithmic"):
opt_algorithmic = True
if o in ("-g", "--gtk"):
opt_gtk = True
if o in ("-u", "--unicodedatatxt"):
opt_unicodedatatxt = True
if o in ("-v", "--verbose"):
opt_verbose = True
if o in ("-p", "--plane1"):
opt_plane1 = True
if o in ("-n", "--numeric"):
opt_numeric = True
if o in ("-e", "--gtk-expanded"):
opt_gtkexpanded = True
if o == "--all-sequences":
opt_allsequences = True
if not opt_algorithmic and not opt_gtk and not opt_unicodedatatxt:
opt_statistics = True
def download_hook(blocks_transferred, block_size, file_size):
""" A download hook to provide some feedback when downloading """
if blocks_transferred == 0:
if file_size > 0:
if opt_verbose:
print "Downloading", file_size, "bytes: ",
else:
if opt_verbose:
print "Downloading: ",
sys.stdout.write('#')
sys.stdout.flush()
def download_file(url):
""" Downloads a file provided a URL. Returns the filename. """
""" Borks on failure """
localfilename = url.split('/')[-1]
if not isfile(localfilename) or getsize(localfilename) <= 0:
if opt_verbose:
print "Downloading ", url, "..."
try:
urlretrieve(url, localfilename, download_hook)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
print " done."
else:
if opt_verbose:
print "Using cached file for ", url
return localfilename
def process_gdkkeysymsh():
""" Opens the gdkkeysyms.h file from GTK+/gdk/gdkkeysyms.h """
""" Fills up keysymdb with contents """
filename_gdkkeysymsh = download_file(URL_GDKKEYSYMSH)
try:
gdkkeysymsh = open(filename_gdkkeysymsh, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the gdkkeysyms.h file and place contents in keysymdb """
linenum_gdkkeysymsh = 0
keysymdb = {}
for line in gdkkeysymsh.readlines():
linenum_gdkkeysymsh += 1
line = line.strip()
if line == "" or not match('^#define GDK_KEY_', line):
continue
components = split('\s+', line)
if len(components) < 3:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting 3 items in the line"
sys.exit(-1)
if not match('^GDK_KEY_', components[1]):
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a keysym starting with GDK_KEY_"
sys.exit(-1)
if match('^0x[0-9a-fA-F]+$', components[2]):
unival = long(components[2][2:], 16)
if unival == 0:
continue
keysymdb[components[1][8:]] = unival
else:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a hexadecimal number at the end of the line"
sys.exit(-1)
gdkkeysymsh.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
#keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is^Wwas preferential treatment for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is^was preferential treatment for Greek """
#keysymdb['combining_tilde'] = 0x342
""" Fixing VoidSymbol """
keysymdb['VoidSymbol'] = 0xFFFF
return keysymdb
def process_keysymstxt():
""" Grabs and opens the keysyms.txt file that Markus Kuhn maintains """
""" This file keeps a record between keysyms <-> unicode chars """
filename_keysymstxt = download_file(URL_KEYSYMSTXT)
try:
keysymstxt = open(filename_keysymstxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the keysyms.txt file and place content in keysymdb """
linenum_keysymstxt = 0
keysymdb = {}
for line in keysymstxt.readlines():
linenum_keysymstxt += 1
line = line.strip()
if line == "" or match('^#', line):
continue
components = split('\s+', line)
if len(components) < 5:
print "Invalid line %(linenum)d in %(filename)s: %(line)s'"\
% {'linenum': linenum_keysymstxt, 'filename': filename_keysymstxt, 'line': line}
print "Was expecting 5 items in the line"
sys.exit(-1)
if match('^U[0-9a-fA-F]+$', components[1]):
unival = long(components[1][1:], 16)
if unival == 0:
continue
keysymdb[components[4]] = unival
keysymstxt.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is preferential treatment for Greek """
""" => we get more savings if used for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is preferential treatment for Greek """
# keysymdb['combining_tilde'] = 0x342
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['Oslash'] = 0x0d8
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['Ssharp'] = 0x1e9e
""" This is for a missing (recently added) keysym """
keysymdb['dead_psili'] = 0x313
""" This is for a missing (recently added) keysym """
keysymdb['dead_dasia'] = 0x314
""" Allows to import Multi_key sequences """
keysymdb['Multi_key'] = 0xff20
keysymdb['zerosubscript'] = 0x2080
keysymdb['onesubscript'] = 0x2081
keysymdb['twosubscript'] = 0x2082
keysymdb['threesubscript'] = 0x2083
keysymdb['foursubscript'] = 0x2084
keysymdb['fivesubscript'] = 0x2085
keysymdb['sixsubscript'] = 0x2086
keysymdb['sevensubscript'] = 0x2087
keysymdb['eightsubscript'] = 0x2088
keysymdb['ninesubscript'] = 0x2089
keysymdb['dead_doublegrave'] = 0x030F
keysymdb['dead_invertedbreve'] = 0x0311
return keysymdb
def keysymvalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymdatabase.has_key(keysym):
return keysymdatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymvalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
#return -1
sys.exit(-1)
def keysymunicodevalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymunicodedatabase.has_key(keysym):
return keysymunicodedatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymunicodevalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
sys.exit(-1)
def rename_combining(seq):
filtered_sequence = []
for ks in seq:
if findall('^combining_', ks):
ks = sub('^combining_', 'dead_', ks)
if ks == 'dead_double_grave':
ks = 'dead_doublegrave'
if ks == 'dead_inverted_breve':
ks = 'dead_invertedbreve'
filtered_sequence.append(ks)
return filtered_sequence
keysymunicodedatabase = process_keysymstxt()
keysymdatabase = process_gdkkeysymsh()
""" Grab and open the compose file from upstream """
filename_compose = download_file(URL_COMPOSE)
try:
composefile = open(filename_compose, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Look if there is a lookaside (supplementary) compose file in the current
directory, and if so, open, then merge with upstream Compose file.
"""
xorg_compose_sequences_raw = []
for seq in composefile.readlines():
xorg_compose_sequences_raw.append(seq)
try:
composefile_lookaside = open(FILENAME_COMPOSE_SUPPLEMENTARY, 'r')
for seq in composefile_lookaside.readlines():
xorg_compose_sequences_raw.append(seq)
except IOError, (errno, strerror):
if opt_verbose:
print "I/O error(%s): %s" % (errno, strerror)
print "Did not find lookaside compose file. Continuing..."
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the compose file in xorg_compose_sequences"""
xorg_compose_sequences = []
xorg_compose_sequences_algorithmic = []
linenum_compose = 0
comment_nest_depth = 0
for line in xorg_compose_sequences_raw:
linenum_compose += 1
line = line.strip()
if match("^XCOMM", line) or match("^#", line):
continue
line = sub(r"\/\*([^\*]*|[\*][^/])\*\/", "", line)
comment_start = line.find("/*")
if comment_start >= 0:
if comment_nest_depth == 0:
line = line[:comment_start]
else:
line = ""
comment_nest_depth += 1
else:
comment_end = line.find("*/")
if comment_end >= 0:
comment_nest_depth -= 1
if comment_nest_depth < 0:
print "Invalid comment %(linenum_compose)d in %(filename)s: \
Closing '*/' without opening '/*'" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
if comment_nest_depth > 0:
line = ""
else:
line = line[comment_end + 2:]
if line is "":
continue
#line = line[:-1]
components = split(':', line)
if len(components) != 2:
print "Invalid line %(linenum_compose)d in %(filename)s: No sequence\
/value pair found" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
(seq, val ) = split(':', line)
seq = seq.strip()
val = val.strip()
raw_sequence = findall('\w+', seq)
values = split('\s+', val)
unichar_temp = split('"', values[0])
unichar = unichar_temp[1]
if len(values) == 1:
continue
codepointstr = values[1]
if values[1] == '#':
# No codepoints that are >1 characters yet.
continue
if raw_sequence[0][0] == 'U' and match('[0-9a-fA-F]+$', raw_sequence[0][1:]):
raw_sequence[0] = '0x' + raw_sequence[0][1:]
if match('^U[0-9a-fA-F]+$', codepointstr):
codepoint = long(codepointstr[1:], 16)
elif keysymunicodedatabase.has_key(codepointstr):
#if keysymdatabase[codepointstr] != keysymunicodedatabase[codepointstr]:
#print "DIFFERENCE: 0x%(a)X 0x%(b)X" % { "a": keysymdatabase[codepointstr], "b": keysymunicodedatabase[codepointstr]},
#print raw_sequence, codepointstr
codepoint = keysymunicodedatabase[codepointstr]
else:
print
print "Invalid codepoint at line %(linenum_compose)d in %(filename)s:\
%(line)s" % { "linenum_compose": linenum_compose, "filename": filename_compose, "line": line }
exit(-1)
sequence = rename_combining(raw_sequence)
reject_this = False
for i in sequence:
if keysymvalue(i) > 0xFFFF:
reject_this = True
if opt_plane1:
print sequence
break
if keysymvalue(i) < 0:
reject_this = True
break
if reject_this:
continue
if "U0342" in sequence or \
"U0313" in sequence or \
"U0314" in sequence or \
"0x0313" in sequence or \
"0x0342" in sequence or \
"0x0314" in sequence:
continue
if "dead_belowring" in sequence or\
"dead_currency" in sequence or\
"dead_belowcomma" in sequence or\
"dead_belowmacron" in sequence or\
"dead_belowtilde" in sequence or\
"dead_belowbreve" in sequence or\
"dead_belowdiaeresis" in sequence or\
"dead_belowcircumflex" in sequence:
continue
#for i in range(len(sequence)):
# if sequence[i] == "0x0342":
# sequence[i] = "dead_tilde"
if "Multi_key" not in sequence:
""" Ignore for now >0xFFFF keysyms """
if codepoint < 0xFFFF:
original_sequence = copy(sequence)
stats_sequence = copy(sequence)
base = sequence.pop()
basechar = keysymvalue(base, filename_compose, linenum_compose)
if basechar < 0xFFFF:
counter = 1
unisequence = []
not_normalised = True
skipping_this = False
for i in range(0, len(sequence)):
""" If the sequence has dead_tilde and is for Greek, we don't do algorithmically
because of lack of dead_perispomeni (i.e. conflict)
"""
bc = basechar
"""if sequence[-1] == "dead_tilde" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_horn" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_ogonek" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_psili":
sequence[i] = "dead_horn"
if sequence[-1] == "dead_dasia":
sequence[-1] = "dead_ogonek"
"""
unisequence.append(unichr(keysymunicodevalue(sequence.pop(), filename_compose, linenum_compose)))
if skipping_this:
unisequence = []
for perm in all_permutations(unisequence):
# print counter, original_sequence, unichr(basechar) + "".join(perm)
# print counter, map(unichr, perm)
normalized = normalize('NFC', unichr(basechar) + "".join(perm))
if len(normalized) == 1:
# print 'Base: %(base)s [%(basechar)s], produces [%(unichar)s] (0x%(codepoint)04X)' \
# % { "base": base, "basechar": unichr(basechar), "unichar": unichar, "codepoint": codepoint },
# print "Normalized: [%(normalized)s] SUCCESS %(c)d" % { "normalized": normalized, "c": counter }
stats_sequence_data = map(keysymunicodevalue, stats_sequence)
stats_sequence_data.append(normalized)
xorg_compose_sequences_algorithmic.append(stats_sequence_data)
not_normalised = False
break;
counter += 1
if not_normalised or opt_allsequences:
original_sequence.append(codepoint)
xorg_compose_sequences.append(original_sequence)
""" print xorg_compose_sequences[-1] """
else:
print "Error in base char !?!"
exit(-2)
else:
print "OVER", sequence
exit(-1)
else:
sequence.append(codepoint)
xorg_compose_sequences.append(sequence)
""" print xorg_compose_sequences[-1] """
def sequence_cmp(x, y):
if keysymvalue(x[0]) > keysymvalue(y[0]):
return 1
elif keysymvalue(x[0]) < keysymvalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymvalue(x[1]) > keysymvalue(y[1]):
return 1
elif keysymvalue(x[1]) < keysymvalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymvalue(x[2]) > keysymvalue(y[2]):
return 1
elif keysymvalue(x[2]) < keysymvalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymvalue(x[3]) > keysymvalue(y[3]):
return 1
elif keysymvalue(x[3]) < keysymvalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymvalue(x[4]) > keysymvalue(y[4]):
return 1
elif keysymvalue(x[4]) < keysymvalue(y[4]):
return -1
else:
return 0
def sequence_unicode_cmp(x, y):
if keysymunicodevalue(x[0]) > keysymunicodevalue(y[0]):
return 1
elif keysymunicodevalue(x[0]) < keysymunicodevalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymunicodevalue(x[1]) > keysymunicodevalue(y[1]):
return 1
elif keysymunicodevalue(x[1]) < keysymunicodevalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymunicodevalue(x[2]) > keysymunicodevalue(y[2]):
return 1
elif keysymunicodevalue(x[2]) < keysymunicodevalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymunicodevalue(x[3]) > keysymunicodevalue(y[3]):
return 1
elif keysymunicodevalue(x[3]) < keysymunicodevalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymunicodevalue(x[4]) > keysymunicodevalue(y[4]):
return 1
elif keysymunicodevalue(x[4]) < keysymunicodevalue(y[4]):
return -1
else:
return 0
def sequence_algorithmic_cmp(x, y):
if len(x) < len(y):
return -1
elif len(x) > len(y):
return 1
else:
for i in range(len(x)):
if x[i] < y[i]:
return -1
elif x[i] > y[i]:
return 1
return 0
xorg_compose_sequences.sort(sequence_cmp)
xorg_compose_sequences_uniqued = []
first_time = True
item = None
for next_item in xorg_compose_sequences:
if first_time:
first_time = False
item = next_item
if sequence_unicode_cmp(item, next_item) != 0:
xorg_compose_sequences_uniqued.append(item)
item = next_item
xorg_compose_sequences = copy(xorg_compose_sequences_uniqued)
counter_multikey = 0
for item in xorg_compose_sequences:
if findall('Multi_key', "".join(item[:-1])) != []:
counter_multikey += 1
xorg_compose_sequences_algorithmic.sort(sequence_algorithmic_cmp)
xorg_compose_sequences_algorithmic_uniqued = uniq(xorg_compose_sequences_algorithmic)
firstitem = ""
num_first_keysyms = 0
zeroes = 0
num_entries = 0
num_algorithmic_greek = 0
for sequence in xorg_compose_sequences:
if keysymvalue(firstitem) != keysymvalue(sequence[0]):
firstitem = sequence[0]
num_first_keysyms += 1
zeroes += 6 - len(sequence) + 1
num_entries += 1
for sequence in xorg_compose_sequences_algorithmic_uniqued:
ch = ord(sequence[-1:][0])
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
num_algorithmic_greek += 1
if opt_algorithmic:
for sequence in xorg_compose_sequences_algorithmic_uniqued:
letter = "".join(sequence[-1:])
print '0x%(cp)04X, %(uni)s, seq: [ <0x%(base)04X>,' % { 'cp': ord(unicode(letter)), 'uni': letter.encode('utf-8'), 'base': sequence[-2] },
for elem in sequence[:-2]:
print "<0x%(keysym)04X>," % { 'keysym': elem },
""" Yeah, verified... We just want to keep the output similar to -u, so we can compare/sort easily """
print "], recomposed as", letter.encode('utf-8'), "verified"
def num_of_keysyms(seq):
return len(seq) - 1
def convert_UnotationToHex(arg):
if isinstance(arg, str):
if match('^U[0-9A-F][0-9A-F][0-9A-F][0-9A-F]$', arg):
return sub('^U', '0x', arg)
return arg
def addprefix_GDK(arg):
if match('^0x', arg):
return '%(arg)s, ' % { 'arg': arg }
else:
return 'GDK_KEY_%(arg)s, ' % { 'arg': arg }
if opt_gtk:
first_keysym = ""
sequence = []
compose_table = []
ct_second_part = []
ct_sequence_width = 2
start_offset = num_first_keysyms * (WIDTHOFCOMPOSETABLE+1)
we_finished = False
counter = 0
sequence_iterator = iter(xorg_compose_sequences)
sequence = sequence_iterator.next()
while True:
first_keysym = sequence[0] # Set the first keysym
compose_table.append([first_keysym, 0, 0, 0, 0, 0])
while sequence[0] == first_keysym:
compose_table[counter][num_of_keysyms(sequence)-1] += 1
try:
sequence = sequence_iterator.next()
except StopIteration:
we_finished = True
break
if we_finished:
break
counter += 1
ct_index = start_offset
for line_num in range(len(compose_table)):
for i in range(WIDTHOFCOMPOSETABLE):
occurences = compose_table[line_num][i+1]
compose_table[line_num][i+1] = ct_index
ct_index += occurences * (i+2)
for sequence in xorg_compose_sequences:
ct_second_part.append(map(convert_UnotationToHex, sequence))
print headerfile_start
for i in compose_table:
if opt_gtkexpanded:
print "0x%(ks)04X," % { "ks": keysymvalue(i[0]) },
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i[1:])) }
elif not match('^0x', i[0]):
print 'GDK_KEY_%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
else:
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
for i in ct_second_part:
if opt_numeric:
for ks in i[1:][:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
for ks in i[:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
elif opt_gtkexpanded:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1])), 'cp':i[-1] }
else:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1][1:])), 'cp':i[-1] }
print headerfile_end
def redecompose(codepoint):
(name, decomposition, combiningclass) = unicodedatabase[codepoint]
if decomposition[0] == '' or decomposition[0] == '0':
return [codepoint]
if match('<\w+>', decomposition[0]):
numdecomposition = map(stringtohex, decomposition[1:])
return map(redecompose, numdecomposition)
numdecomposition = map(stringtohex, decomposition)
return map(redecompose, numdecomposition)
def process_unicodedata_file(verbose = False):
""" Grab from wget http://www.unicode.org/Public/UNIDATA/UnicodeData.txt """
filename_unicodedatatxt = download_file(URL_UNICODEDATATXT)
try:
unicodedatatxt = open(filename_unicodedatatxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
for line in unicodedatatxt.readlines():
if line[0] == "" or line[0] == '#':
continue
line = line[:-1]
uniproperties = split(';', line)
codepoint = stringtohex(uniproperties[0])
""" We don't do Plane 1 or CJK blocks. The latter require reading additional files. """
if codepoint > 0xFFFF or (codepoint >= 0x4E00 and codepoint <= 0x9FFF) or (codepoint >= 0xF900 and codepoint <= 0xFAFF):
continue
name = uniproperties[1]
category = uniproperties[2]
combiningclass = uniproperties[3]
decomposition = uniproperties[5]
unicodedatabase[codepoint] = [name, split('\s+', decomposition), combiningclass]
counter_combinations = 0
counter_combinations_greek = 0
counter_entries = 0
counter_entries_greek = 0
for item in unicodedatabase.keys():
(name, decomposition, combiningclass) = unicodedatabase[item]
if decomposition[0] == '':
continue
print name, "is empty"
elif match('<\w+>', decomposition[0]):
continue
print name, "has weird", decomposition[0]
else:
sequence = map(stringtohex, decomposition)
chrsequence = map(unichr, sequence)
normalized = normalize('NFC', "".join(chrsequence))
""" print name, sequence, "Combining: ", "".join(chrsequence), normalized, len(normalized), """
decomposedsequence = []
for subseq in map(redecompose, sequence):
for seqitem in subseq:
if isinstance(seqitem, list):
for i in seqitem:
if isinstance(i, list):
for j in i:
decomposedsequence.append(j)
else:
decomposedsequence.append(i)
else:
decomposedsequence.append(seqitem)
recomposedchar = normalize('NFC', "".join(map(unichr, decomposedsequence)))
if len(recomposedchar) == 1 and len(decomposedsequence) > 1:
counter_entries += 1
counter_combinations += factorial(len(decomposedsequence)-1)
ch = item
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
counter_entries_greek += 1
counter_combinations_greek += factorial(len(decomposedsequence)-1)
if verbose:
print "0x%(cp)04X, %(uni)c, seq:" % { 'cp':item, 'uni':unichr(item) },
print "[",
for elem in decomposedsequence:
print '<0x%(hex)04X>,' % { 'hex': elem },
print "], recomposed as", recomposedchar,
if unichr(item) == recomposedchar:
print "verified"
if verbose == False:
print "Unicode statistics from UnicodeData.txt"
print "Number of entries that can be algorithmically produced :", counter_entries
print " of which are for Greek :", counter_entries_greek
print "Number of compose sequence combinations requiring :", counter_combinations
print " of which are for Greek :", counter_combinations_greek
print "Note: We do not include partial compositions, "
print "thus the slight discrepancy in the figures"
print
if opt_unicodedatatxt:
process_unicodedata_file(True)
if opt_statistics:
print
print "Total number of compose sequences (from file) :", len(xorg_compose_sequences) + len(xorg_compose_sequences_algorithmic)
print " of which can be expressed algorithmically :", len(xorg_compose_sequences_algorithmic)
print " of which cannot be expressed algorithmically :", len(xorg_compose_sequences)
print " of which have Multi_key :", counter_multikey
print
print "Algorithmic (stats for Xorg Compose file)"
print "Number of sequences off due to algo from file (len(array)) :", len(xorg_compose_sequences_algorithmic)
print "Number of sequences off due to algo (uniq(sort(array))) :", len(xorg_compose_sequences_algorithmic_uniqued)
print " of which are for Greek :", num_algorithmic_greek
print
process_unicodedata_file()
print "Not algorithmic (stats from Xorg Compose file)"
print "Number of sequences :", len(xorg_compose_sequences)
print "Flat array looks like :", len(xorg_compose_sequences), "rows of 6 integers (2 bytes per int, or 12 bytes per row)"
print "Flat array would have taken up (in bytes) :", num_entries * 2 * 6, "bytes from the GTK+ library"
print "Number of items in flat array :", len(xorg_compose_sequences) * 6
print " of which are zeroes :", zeroes, "or ", (100 * zeroes) / (len(xorg_compose_sequences) * 6), " per cent"
print "Number of different first items :", num_first_keysyms
print "Number of max bytes (if using flat array) :", num_entries * 2 * 6
print "Number of savings :", zeroes * 2 - num_first_keysyms * 2 * 5
print
print "Memory needs if both algorithmic+optimised table in latest Xorg compose file"
print " :", num_entries * 2 * 6 - zeroes * 2 + num_first_keysyms * 2 * 5
print
print "Existing (old) implementation in GTK+"
print "Number of sequences in old gtkimcontextsimple.c :", 691
print "The existing (old) implementation in GTK+ takes up :", 691 * 2 * 12, "bytes"
|
bsd-3-clause
|
byndcivilization/toy-infrastructure
|
flask-app/venv/lib/python3.6/site-packages/pip/utils/ui.py
|
490
|
11597
|
from __future__ import absolute_import
from __future__ import division
import itertools
import sys
from signal import signal, SIGINT, default_int_handler
import time
import contextlib
import logging
from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor import six
from pip._vendor.progress.bar import Bar, IncrementalBar
from pip._vendor.progress.helpers import (WritelnMixin,
HIDE_CURSOR, SHOW_CURSOR)
from pip._vendor.progress.spinner import Spinner
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
logger = logging.getLogger(__name__)
def _select_progress_class(preferred, fallback):
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar)
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
"""
Save the original SIGINT handler for later.
"""
super(InterruptibleMixin, self).__init__(*args, **kwargs)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame):
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
super(DownloadProgressMixin, self).__init__(*args, **kwargs)
self.message = (" " * (get_indentation() + 2)) + self.message
@property
def downloaded(self):
return format_size(self.index)
@property
def download_speed(self):
# Avoid zero division errors...
if self.avg == 0.0:
return "..."
return format_size(1 / self.avg) + "/s"
@property
def pretty_eta(self):
if self.eta:
return "eta %s" % self.eta_td
return ""
def iter(self, it, n=1):
for x in it:
yield x
self.next(n)
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call neds to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor:
self.hide_cursor = False
super(WindowsMixin, self).__init__(*args, **kwargs)
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file)
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class DownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, _BaseBar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
################################################################
# Generic "something is happening" spinners
#
# We don't even try using progress.spinner.Spinner here because it's actually
# simpler to reimplement from scratch than to coerce their code into doing
# what we need.
################################################################
@contextlib.contextmanager
def hidden_cursor(file):
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
# We don't want to clutter the output with control characters if we're
# writing to a file, or if the user is running with --quiet.
# See https://github.com/pypa/pip/issues/3418
elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0
def ready(self):
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
self._last_update = time.time()
class InteractiveSpinner(object):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(object):
def __init__(self, message, min_update_interval_seconds=60):
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
if self._finished:
return
self._update("finished with status '%s'" % (final_status,))
self._finished = True
@contextlib.contextmanager
def open_spinner(message):
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message)
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
|
gpl-3.0
|
DavidNorman/tensorflow
|
tensorflow/python/kernel_tests/proto/proto_op_test_base.py
|
22
|
16705
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test case base for testing proto operations."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import os
from tensorflow.core.framework import types_pb2
from tensorflow.python.kernel_tests.proto import test_example_pb2
from tensorflow.python.platform import test
class ProtoOpTestBase(test.TestCase):
"""Base class for testing proto decoding and encoding ops."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(ProtoOpTestBase, self).__init__(methodName)
lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
if os.path.isfile(lib):
ct.cdll.LoadLibrary(lib)
@staticmethod
def named_parameters(extension=True):
parameters = [("defaults", ProtoOpTestBase.defaults_test_case()),
("minmax", ProtoOpTestBase.minmax_test_case()),
("nested", ProtoOpTestBase.nested_test_case()),
("optional", ProtoOpTestBase.optional_test_case()),
("promote", ProtoOpTestBase.promote_test_case()),
("ragged", ProtoOpTestBase.ragged_test_case()),
("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
("simple", ProtoOpTestBase.simple_test_case())]
if extension:
parameters.append(("extension", ProtoOpTestBase.extension_test_case()))
return parameters
@staticmethod
def defaults_test_case():
test_case = test_example_pb2.TestCase()
test_case.values.add() # No fields specified, so we get all defaults.
test_case.shapes.append(1)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "double_value_with_default"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(1.0)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "float_value_with_default"
field.dtype = types_pb2.DT_FLOAT
field.value.float_value.append(2.0)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "int64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(3)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sfixed64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(11)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sint64_value_with_default"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(13)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "uint64_value_with_default"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "fixed64_value_with_default"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(6)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "int32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(5)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sfixed32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(10)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "sint32_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(12)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "uint32_value_with_default"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(9)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "fixed32_value_with_default"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(7)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "bool_value_with_default"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "string_value_with_default"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("a")
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "bytes_value_with_default"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("a longer default string")
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "enum_value_with_default"
field.dtype = types_pb2.DT_INT32
field.value.enum_value.append(test_example_pb2.Color.GREEN)
return test_case
@staticmethod
def minmax_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(-1.7976931348623158e+308)
value.double_value.append(2.2250738585072014e-308)
value.double_value.append(1.7976931348623158e+308)
value.float_value.append(-3.402823466e+38)
value.float_value.append(1.175494351e-38)
value.float_value.append(3.402823466e+38)
value.int64_value.append(-9223372036854775808)
value.int64_value.append(9223372036854775807)
value.sfixed64_value.append(-9223372036854775808)
value.sfixed64_value.append(9223372036854775807)
value.sint64_value.append(-9223372036854775808)
value.sint64_value.append(9223372036854775807)
value.uint64_value.append(0)
value.uint64_value.append(18446744073709551615)
value.fixed64_value.append(0)
value.fixed64_value.append(18446744073709551615)
value.int32_value.append(-2147483648)
value.int32_value.append(2147483647)
value.sfixed32_value.append(-2147483648)
value.sfixed32_value.append(2147483647)
value.sint32_value.append(-2147483648)
value.sint32_value.append(2147483647)
value.uint32_value.append(0)
value.uint32_value.append(4294967295)
value.fixed32_value.append(0)
value.fixed32_value.append(4294967295)
value.bool_value.append(False)
value.bool_value.append(True)
value.string_value.append("")
value.string_value.append("I refer to the infinite.")
test_case.shapes.append(1)
test_case.sizes.append(3)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(-1.7976931348623158e+308)
field.value.double_value.append(2.2250738585072014e-308)
field.value.double_value.append(1.7976931348623158e+308)
test_case.sizes.append(3)
field = test_case.fields.add()
field.name = "float_value"
field.dtype = types_pb2.DT_FLOAT
field.value.float_value.append(-3.402823466e+38)
field.value.float_value.append(1.175494351e-38)
field.value.float_value.append(3.402823466e+38)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "int64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sfixed64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sint64_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(-9223372036854775808)
field.value.int64_value.append(9223372036854775807)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "uint64_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(0)
field.value.uint64_value.append(18446744073709551615)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "fixed64_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(0)
field.value.uint64_value.append(18446744073709551615)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "int32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sfixed32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "sint32_value"
field.dtype = types_pb2.DT_INT32
field.value.int32_value.append(-2147483648)
field.value.int32_value.append(2147483647)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "uint32_value"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(0)
field.value.uint32_value.append(4294967295)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "fixed32_value"
field.dtype = types_pb2.DT_UINT32
field.value.uint32_value.append(0)
field.value.uint32_value.append(4294967295)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(False)
field.value.bool_value.append(True)
test_case.sizes.append(2)
field = test_case.fields.add()
field.name = "string_value"
field.dtype = types_pb2.DT_STRING
field.value.string_value.append("")
field.value.string_value.append("I refer to the infinite.")
return test_case
@staticmethod
def nested_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
message_value = value.message_value.add()
message_value.double_value = 23.5
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "message_value"
field.dtype = types_pb2.DT_STRING
message_value = field.value.message_value.add()
message_value.double_value = 23.5
return test_case
@staticmethod
def optional_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.bool_value.append(True)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(0)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(0.0)
return test_case
@staticmethod
def promote_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.sint32_value.append(2147483647)
value.sfixed32_value.append(2147483647)
value.int32_value.append(2147483647)
value.fixed32_value.append(4294967295)
value.uint32_value.append(4294967295)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "sint32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "sfixed32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "int32_value"
field.dtype = types_pb2.DT_INT64
field.value.int64_value.append(2147483647)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "fixed32_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4294967295)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "uint32_value"
field.dtype = types_pb2.DT_UINT64
field.value.uint64_value.append(4294967295)
return test_case
@staticmethod
def ragged_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.double_value.append(123.0)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(3.1)
value.bool_value.append(False)
test_case.shapes.append(2)
test_case.sizes.append(2)
test_case.sizes.append(1)
test_case.sizes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
field.value.double_value.append(123.0)
field.value.double_value.append(3.1)
field.value.double_value.append(0.0)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
field.value.bool_value.append(False)
return test_case
@staticmethod
def shaped_batch_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(44.0)
value.bool_value.append(False)
value = test_case.values.add()
value.double_value.append(3.14159)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(1.414)
value.bool_value.append(True)
value = test_case.values.add()
value.double_value.append(-32.2)
value.bool_value.append(False)
value = test_case.values.add()
value.double_value.append(0.0001)
value.bool_value.append(True)
test_case.shapes.append(3)
test_case.shapes.append(2)
for _ in range(12):
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
field.value.double_value.append(44.0)
field.value.double_value.append(3.14159)
field.value.double_value.append(1.414)
field.value.double_value.append(-32.2)
field.value.double_value.append(0.0001)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
field.value.bool_value.append(False)
field.value.bool_value.append(True)
field.value.bool_value.append(True)
field.value.bool_value.append(False)
field.value.bool_value.append(True)
return test_case
@staticmethod
def extension_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
message_value = value.Extensions[test_example_pb2.ext_value].add()
message_value.double_value = 23.5
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = test_example_pb2.ext_value.full_name
field.dtype = types_pb2.DT_STRING
message_value = field.value.Extensions[test_example_pb2.ext_value].add()
message_value.double_value = 23.5
return test_case
@staticmethod
def simple_test_case():
test_case = test_example_pb2.TestCase()
value = test_case.values.add()
value.double_value.append(23.5)
value.bool_value.append(True)
value.enum_value.append(test_example_pb2.Color.INDIGO)
test_case.shapes.append(1)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "double_value"
field.dtype = types_pb2.DT_DOUBLE
field.value.double_value.append(23.5)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "bool_value"
field.dtype = types_pb2.DT_BOOL
field.value.bool_value.append(True)
test_case.sizes.append(1)
field = test_case.fields.add()
field.name = "enum_value"
field.dtype = types_pb2.DT_INT32
field.value.enum_value.append(test_example_pb2.Color.INDIGO)
return test_case
|
apache-2.0
|
WPMedia/dd-agent
|
utils/proxy.py
|
8
|
2497
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import logging
import os
from urllib import getproxies
from urlparse import urlparse
log = logging.getLogger(__name__)
def set_no_proxy_settings():
"""
Starting with Agent 5.0.0, there should always be a local forwarder
running and all payloads should go through it. So we should make sure
that we pass the no_proxy environment variable that will be used by requests
See: https://github.com/kennethreitz/requests/pull/945
"""
to_add = ["127.0.0.1", "localhost", "169.254.169.254"]
no_proxy = os.environ.get("no_proxy", "")
if not no_proxy.strip():
no_proxy = []
else:
no_proxy = no_proxy.split(',')
for host in to_add:
if host not in no_proxy:
no_proxy.append(host)
os.environ['no_proxy'] = ','.join(no_proxy)
def get_proxy(agentConfig):
proxy_settings = {}
# First we read the proxy configuration from datadog.conf
proxy_host = agentConfig.get('proxy_host')
if proxy_host is not None:
proxy_settings['host'] = proxy_host
try:
proxy_settings['port'] = int(agentConfig.get('proxy_port', 3128))
except ValueError:
log.error('Proxy port must be an Integer. Defaulting it to 3128')
proxy_settings['port'] = 3128
proxy_settings['user'] = agentConfig.get('proxy_user')
proxy_settings['password'] = agentConfig.get('proxy_password')
log.debug("Proxy Settings: %s:*****@%s:%s", proxy_settings['user'],
proxy_settings['host'], proxy_settings['port'])
return proxy_settings
# If no proxy configuration was specified in datadog.conf
# We try to read it from the system settings
try:
proxy = getproxies().get('https')
if proxy is not None:
parse = urlparse(proxy)
proxy_settings['host'] = parse.hostname
proxy_settings['port'] = int(parse.port)
proxy_settings['user'] = parse.username
proxy_settings['password'] = parse.password
log.debug("Proxy Settings: %s:*****@%s:%s", proxy_settings['user'],
proxy_settings['host'], proxy_settings['port'])
return proxy_settings
except Exception as e:
log.debug("Error while trying to fetch proxy settings using urllib %s."
"Proxy is probably not set", str(e))
return None
|
bsd-3-clause
|
LinusU/ansible
|
lib/ansible/cli/vault.py
|
52
|
5972
|
# (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
import os
import sys
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.parsing import DataLoader
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.utils.display import Display
class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
def __init__(self, args, display=None):
self.vault_pass = None
super(VaultCLI, self).__init__(args, display)
def parse(self):
self.parser = CLI.base_parser(
vault_opts=True,
usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to self.actions
if self.action == "create":
self.parser.set_usage("usage: %prog create [options] file_name")
elif self.action == "decrypt":
self.parser.set_usage("usage: %prog decrypt [options] file_name")
elif self.action == "edit":
self.parser.set_usage("usage: %prog edit [options] file_name")
elif self.action == "view":
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
can_output = ['encrypt', 'decrypt']
if self.action not in can_output:
if self.options.output_file:
raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output))
if len(self.args) == 0:
raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
else:
# This restriction should remain in place until it's possible to
# load multiple YAML records from a single file, or it's too easy
# to create an encrypted file that can't be read back in. But in
# the meanwhile, "cat a b c|ansible-vault encrypt --output x" is
# a workaround.
if self.options.output_file and len(self.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
if self.options.vault_password_file:
# read vault_pass from a file
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
else:
self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
if self.options.new_vault_password_file:
# for rekey only
self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
self.editor = VaultEditor(self.vault_pass)
self.execute()
def execute_encrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
self.display.display("Reading plaintext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.encrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
self.display.display("Encryption successful", stderr=True)
def execute_decrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
self.display.display("Reading ciphertext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.decrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
self.display.display("Decryption successful", stderr=True)
def execute_create(self):
if len(self.args) > 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
self.editor.create_file(self.args[0])
def execute_edit(self):
for f in self.args:
self.editor.edit_file(f)
def execute_view(self):
for f in self.args:
self.editor.view_file(f)
def execute_rekey(self):
for f in self.args:
if not (os.path.isfile(f)):
raise AnsibleError(f + " does not exist")
if self.new_vault_pass:
new_password = self.new_vault_pass
else:
__, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
for f in self.args:
self.editor.rekey_file(f, new_password)
self.display.display("Rekey successful", stderr=True)
|
gpl-3.0
|
knuevena/americorps-backend
|
orgmember.py
|
1
|
4018
|
from user import User
from db import Base, Session
from sqlalchemy import *
from sqlalchemy.orm import relation, sessionmaker
from datetime import datetime, date
from attendee import Attendee
from werkzeug.security import generate_password_hash, check_password_hash
from flask import json
from sqlalchemy import exc
from event import Event
import organization
class OrgMember(User):
__tablename__ = "orgmembers"
__mapper_args__ = {'polymorphic_identity': 'orgmember'}
id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=False)
# the OrgMember will have all User fields
org = Column(Integer, ForeignKey('organizations.id'), nullable=False) # object or id?
poc = Column(Boolean, nullable=False)
@classmethod
def fromdict(cls, d):
allowed = ('name', 'email', 'passwordhash', 'phone', 'last_active', 'birthdate',
'bio', 'gender', 'org', 'poc')
df = {k: v for k, v in d.items() if k in allowed}
return cls(**df)
def asdict(self):
dict_ = {}
for key in self.__mapper__.c.keys():
result = getattr(self, key)
if isinstance(result, date):
dict_[key] = str(result)
else:
dict_[key] = result
return dict_
def __init__(self, name, email, passwordhash, phone, poc, org, birthdate=None,
bio=None, gender=None):
self.name = name
self.email = email
self.set_password(passwordhash)
if len(phone) > 15 :
raise ValueError("phone number is too long")
elif len(phone) < 10:
raise ValueError("phone number is too short")
elif phone.isdigit() == False:
raise ValueError("phone number must be a string of digits")
else:
self.phone = phone
self.poc = poc
self.last_activity = datetime.now()
self.birthdate = birthdate
self.bio = bio
self.gender = gender
self.org = org
def set_password(self, password):
self.passwordhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.passwordhash, password)
# create a volunteer from a json blob
def getOrgMember(self, id):
s = Session()
content = s.query(OrgMember).filter_by(id=id).first()
s.close()
if content:
return content
else:
raise ValueError("user does not exist")
def confirmAttendee(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.confirmed = True
s.commit()
s.close()
return True
else:
return False
def validateHour(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.hoursValidated = True
s.commit()
s.close()
return True
else:
return False
def deleteSelf(self, session):
s = session
try:
s.delete(self)
except:
raise exc.SQLAlchemyError("failed to delete orgMember " + self.id)
def link_org(orgmember):
s = Session()
o2_org = orgmember.org
org_m = s.query(OrgMember).filter_by(email=orgmember.email).first()
s.close()
if org_m:
org_id = org_m.id
else :
print (exc.InvalidRequestError("query failed"))
return False
json2 = json.dumps({'poc': org_id})
organization.updateOrg(o2_org, json2)
return True
def createMember(json):
o = OrgMember.fromdict(json)
s = Session()
try:
s.add(o)
s.commit()
except:
return False
finally:
s.close()
o2 = OrgMember.fromdict(json)
if link_org(o2):
return True
else:
return False
|
mit
|
linkedin/indextank-service
|
api/boto/ec2/__init__.py
|
10
|
2038
|
# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
service from AWS.
"""
from boto.ec2.connection import EC2Connection
def regions(**kw_params):
"""
Get all available regions for the EC2 service.
You may pass any of the arguments accepted by the EC2Connection
object's constructor as keyword arguments and they will be
passed along to the EC2Connection object.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
c = EC2Connection(**kw_params)
return c.get_all_regions()
def connect_to_region(region_name, **kw_params):
for region in regions(**kw_params):
if region.name == region_name:
return region.connect(**kw_params)
return None
def get_region(region_name, **kw_params):
for region in regions(**kw_params):
if region.name == region_name:
return region
return None
|
apache-2.0
|
apagac/cfme_tests
|
cfme/tests/infrastructure/test_vm_power_control.py
|
1
|
27862
|
# -*- coding: utf-8 -*-
import random
import time
import pytest
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.base.login import BaseLoggedInPage
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.rest.gen_data import users as _users
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.long_running,
pytest.mark.tier(2),
pytest.mark.usefixtures('setup_provider'),
test_requirements.power,
pytest.mark.provider([InfraProvider], scope='class'),
]
@pytest.fixture(scope='function')
def vm_name():
return random_vm_name('pwr-c')
@pytest.fixture(scope="function")
def testing_vm(appliance, provider, vm_name):
"""Fixture to provision vm to the provider being tested"""
vm = appliance.collections.infra_vms.instantiate(vm_name, provider)
if not provider.mgmt.does_vm_exist(vm.name):
logger.info("deploying %s on provider %s", vm.name, provider.key)
vm.create_on_provider(allow_skip="default", find_in_cfme=True)
yield vm
vm.cleanup_on_provider()
if_scvmm_refresh_provider(provider)
@pytest.fixture(scope="function")
def archived_vm(testing_vm):
"""Fixture to archive testing VM"""
testing_vm.mgmt.delete()
testing_vm.wait_for_vm_state_change(desired_state='archived', timeout=720,
from_details=False, from_any_provider=True)
@pytest.fixture(scope="function")
def orphaned_vm(provider, testing_vm):
"""Fixture to orphane VM by removing provider from CFME"""
provider.delete_if_exists(cancel=False)
testing_vm.wait_for_vm_state_change(desired_state='orphaned', timeout=720,
from_details=False, from_any_provider=True)
@pytest.fixture(scope="function")
def testing_vm_tools(appliance, provider, vm_name, full_template):
"""Fixture to provision vm with preinstalled tools to the provider being tested"""
vm = appliance.collections.infra_vms.instantiate(vm_name, provider, full_template.name)
if not provider.mgmt.does_vm_exist(vm.name):
logger.info("deploying %s on provider %s", vm.name, provider.key)
vm.create_on_provider(allow_skip="default", find_in_cfme=True)
yield vm
vm.cleanup_on_provider()
if_scvmm_refresh_provider(provider)
def if_scvmm_refresh_provider(provider):
# No eventing from SCVMM so force a relationship refresh
if provider.one_of(SCVMMProvider):
provider.refresh_provider_relationships()
def check_power_options(provider, soft_assert, vm, power_state):
must_be_available = {
'on': [vm.POWER_OFF, vm.SUSPEND, vm.RESET],
'off': [vm.POWER_ON]
}
mustnt_be_available = {
'on': [vm.POWER_ON],
'off': [vm.POWER_OFF, vm.SUSPEND, vm.RESET]
}
# VMware and RHEVM have extended power options
if not provider.one_of(SCVMMProvider):
mustnt_be_available['off'].extend([vm.GUEST_RESTART, vm.GUEST_SHUTDOWN])
if not provider.one_of(SCVMMProvider, RHEVMProvider):
mustnt_be_available['on'].extend([vm.GUEST_RESTART, vm.GUEST_SHUTDOWN])
if provider.one_of(RHEVMProvider):
must_be_available['on'].remove(vm.RESET)
view = navigate_to(vm, 'Details')
power_dropdown = view.toolbar.power
for pwr_option in must_be_available[power_state]:
soft_assert(power_dropdown.item_enabled(pwr_option),
"'{}' must be available in current power state - '{}' ".format(pwr_option,
power_state))
for pwr_option in mustnt_be_available[power_state]:
pwr_state = power_dropdown.has_item(pwr_option) and power_dropdown.item_enabled(pwr_option)
soft_assert(not pwr_state,
"'{}' must not be available in current power state - '{}' ".format(pwr_option,
power_state))
def wait_for_last_boot_timestamp_refresh(vm, boot_time, timeout=300):
"""Timestamp update doesn't happen with state change so need a longer
wait when expecting a last boot timestamp change"""
view = navigate_to(vm, "Details")
def _wait_for_timestamp_refresh():
cur_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
return boot_time != cur_boot_time
try:
wait_for(_wait_for_timestamp_refresh, num_sec=timeout, delay=30,
fail_func=view.toolbar.reload.click)
return True
except TimedOutError:
return False
def ensure_state_changed_on_unchanged(vm, state_changed_on):
"""Returns True if current value of State Changed On in the Power Management
is the same as the supplied (original) value."""
view = navigate_to(vm, "Details")
new_state_changed_on = view.entities.summary("Power Management").get_text_of("State Changed On")
return state_changed_on == new_state_changed_on
def wait_for_vm_tools(vm, timeout=300):
"""Sometimes test opens VM details before it gets loaded and can't verify if vmtools are
installed"""
view = navigate_to(vm, "Details")
def _wait_for_tools_ok():
return view.entities.summary("Properties").get_text_of("Platform Tools") == 'toolsOk'
try:
wait_for(_wait_for_tools_ok, num_sec=timeout, delay=10, fail_func=view.toolbar.reload.click)
except TimedOutError:
return False
class TestControlOnQuadicons(object):
@pytest.mark.rhv3
def test_power_off_cancel(self, testing_vm, ensure_vm_running, soft_assert):
"""Tests power off cancel
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/10h
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=True)
if_scvmm_refresh_provider(testing_vm.provider)
# TODO: assert no event.
time.sleep(60)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'on')
soft_assert(
testing_vm.mgmt.is_running, "vm not running")
@pytest.mark.rhv1
def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert):
"""Tests power off
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=False)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(text='Stop initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=900)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'off')
soft_assert(not testing_vm.mgmt.is_running, "vm running")
@pytest.mark.rhv3
def test_power_on_cancel(self, testing_vm, ensure_vm_stopped, soft_assert):
"""Tests power on cancel
Polarion:
assignee: ghubale
initialEstimate: 1/4h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=True)
if_scvmm_refresh_provider(testing_vm.provider)
time.sleep(60)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'off')
soft_assert(not testing_vm.mgmt.is_running, "vm running")
@pytest.mark.rhv1
@pytest.mark.tier(1)
def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert):
"""Tests power on
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(text='Start initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=900)
vm_state = testing_vm.find_quadicon().data['state']
soft_assert(vm_state == 'on')
soft_assert(testing_vm.mgmt.is_running, "vm not running")
class TestVmDetailsPowerControlPerProvider(object):
@pytest.mark.rhv3
def test_power_off(self, appliance, testing_vm, ensure_vm_running, soft_assert):
"""Tests power off
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
view = navigate_to(testing_vm, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=False,
from_details=True)
view.flash.assert_success_message(text='Stop initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True)
soft_assert(not testing_vm.mgmt.is_running, "vm running")
# BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604
if not testing_vm.provider.one_of(RHEVMProvider):
new_last_boot_time = view.entities.summary("Power Management").get_text_of(
"Last Boot Time")
soft_assert(new_last_boot_time == last_boot_time,
"ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
@pytest.mark.rhv3
def test_power_on(self, appliance, testing_vm, ensure_vm_stopped, soft_assert):
"""Tests power on
Metadata:
test_flag: power_control, provision
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True)
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False,
from_details=True)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_success_message(text='Start initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
soft_assert(testing_vm.mgmt.is_running, "vm not running")
@pytest.mark.rhv3
@pytest.mark.meta(automates=[BZ(1174858)])
def test_suspend(self, appliance, testing_vm, ensure_vm_running, soft_assert):
"""Tests suspend
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
Bugzilla:
1174858
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
view = navigate_to(testing_vm, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm.power_control_from_cfme(option=testing_vm.SUSPEND,
cancel=False,
from_details=True)
view.flash.assert_success_message(text='Suspend initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_SUSPENDED,
timeout=450,
from_details=True)
soft_assert(testing_vm.mgmt.is_suspended, "vm not suspended")
if not testing_vm.provider.one_of(RHEVMProvider):
new_last_boot_time = view.entities.summary("Power Management").get_text_of(
"Last Boot Time")
soft_assert(new_last_boot_time == last_boot_time,
"ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
@pytest.mark.rhv1
def test_start_from_suspend(self, appliance, testing_vm, ensure_vm_suspended, soft_assert):
"""Tests start from suspend
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
caseimportance: high
tags: power
"""
try:
testing_vm.provider.refresh_provider_relationships()
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True)
except TimedOutError:
if testing_vm.provider.one_of(RHEVMProvider):
logger.warning('working around bz1174858, ignoring timeout')
else:
raise
view = navigate_to(testing_vm, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False,
from_details=True)
view.flash.assert_success_message(text='Start initiated', partial=True)
if_scvmm_refresh_provider(testing_vm.provider)
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600)
soft_assert(testing_vm.mgmt.is_running, "vm not running")
@pytest.mark.rhv3
def test_no_template_power_control(provider, soft_assert):
""" Ensures that no power button is displayed for templates.
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/10h
setup:
1. An infra provider that has some templates.
testSteps:
1. Open the view of all templates of the provider
2. Verify the Power toolbar button is not visible
3. Select some template using the checkbox
4. Verify the Power toolbar button is not visible
5. Click on some template to get into the details page
6. Verify the Power toolbar button is not visible
Bugzilla:
1496383
1634713
"""
view = navigate_to(provider, 'ProviderTemplates')
view.toolbar.view_selector.select('Grid View')
soft_assert(not view.toolbar.power.is_displayed, "Power displayed in template grid view!")
# Ensure selecting a template doesn't cause power menu to appear
templates = view.entities.all_entity_names
template_name = random.choice(templates)
selected_template = provider.appliance.collections.infra_templates.instantiate(template_name,
provider)
# Check the power button with checking the quadicon
view = navigate_to(selected_template, 'AllForProvider', use_resetter=False)
entity = view.entities.get_entity(name=selected_template.name, surf_pages=True)
entity.check()
for action in view.toolbar.power.items:
# Performing power actions on template
view.toolbar.power.item_select(action, handle_alert=True)
if action == 'Power On':
action = 'Start'
elif action == 'Power Off':
action = 'Stop'
view.flash.assert_message('{} action does not apply to selected items'.format(action))
view.flash.dismiss()
# Ensure there isn't a power button on the details page
entity.click()
soft_assert(not view.toolbar.power.is_displayed, "Power displayed in template details!")
@pytest.mark.rhv3
@pytest.mark.meta(
blockers=[
BZ(
1723805,
unblock=lambda provider: not provider.one_of(SCVMMProvider),
)
]
)
def test_no_power_controls_on_archived_vm(appliance, testing_vm, archived_vm, soft_assert):
""" Ensures that no power button is displayed from details view of archived vm
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/10h
setup:
1. Archived VM should be available
testSteps:
1. Open the view of VM Details
2. Verify the Power toolbar button is not visible
Bugzilla:
1520489
1659340
"""
view = navigate_to(testing_vm, 'AnyProviderDetails', use_resetter=False)
status = getattr(view.toolbar.power, "is_enabled")
assert not status, "Power displayed in archived VM's details!"
@pytest.mark.rhv3
def test_archived_vm_status(testing_vm, archived_vm):
"""Tests archived vm status
Metadata:
test_flag: inventory
Polarion:
assignee: ghubale
casecomponent: Infra
caseimportance: high
initialEstimate: 1/8h
tags: power
"""
vm_state = testing_vm.find_quadicon(from_any_provider=True).data['state']
assert (vm_state == 'archived')
@pytest.mark.rhv3
def test_orphaned_vm_status(testing_vm, orphaned_vm):
"""Tests orphaned vm status
Polarion:
assignee: ghubale
initialEstimate: 1/10h
casecomponent: Infra
tags: power
"""
vm_state = testing_vm.find_quadicon(from_any_provider=True).data['state']
assert (vm_state == 'orphaned')
@pytest.mark.rhv1
def test_vm_power_options_from_on(provider, soft_assert, testing_vm, ensure_vm_running):
"""Tests vm power options from on
Metadata:
test_flag: power_control
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/4h
"""
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_ON, timeout=720, from_details=True)
check_power_options(provider, soft_assert, testing_vm, testing_vm.STATE_ON)
@pytest.mark.rhv3
@pytest.mark.meta(automates=[BZ(1724062)])
def test_vm_power_options_from_off(provider, soft_assert, testing_vm, ensure_vm_stopped):
"""Tests vm power options from off
Metadata:
test_flag: power_control
Polarion:
assignee: ghubale
casecomponent: Infra
initialEstimate: 1/4h
Bugzilla:
1724062
"""
# TODO([email protected]): Update this test case with power options(shutdown and restart guest)
# for scvmm provider
testing_vm.wait_for_vm_state_change(
desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True)
check_power_options(provider, soft_assert, testing_vm, testing_vm.STATE_OFF)
@pytest.mark.provider([VMwareProvider, RHEVMProvider], override=True, scope='function')
@pytest.mark.meta(automates=[1571830, 1650506])
def test_guest_os_reset(appliance, provider, testing_vm_tools, ensure_vm_running, soft_assert):
"""Tests vm guest os reset
Metadata:
test_flag: power_control
Polarion:
assignee: ghubale
initialEstimate: 1/6h
casecomponent: Infra
tags: power
Bugzilla:
1571830
1650506
"""
# TODO([email protected]): Update this test case for power operation(restart guest) for scvmm
wait_for_vm_tools(testing_vm_tools)
view = navigate_to(testing_vm_tools, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
state_changed_on = view.entities.summary("Power Management").get_text_of("State Changed On")
testing_vm_tools.power_control_from_cfme(
option=testing_vm_tools.GUEST_RESTART, cancel=False, from_details=True)
view.flash.assert_success_message(text='Restart Guest initiated', partial=True)
if not (provider.one_of(RHEVMProvider) and BZ(1571830, forced_streams=["5.10", "5.11"]).blocks):
soft_assert(
wait_for_last_boot_timestamp_refresh(testing_vm_tools, last_boot_time),
"Last Boot Time value has not been refreshed",
)
soft_assert(
ensure_state_changed_on_unchanged(testing_vm_tools, state_changed_on),
"Value of 'State Changed On' has changed after guest restart",
)
soft_assert(testing_vm_tools.mgmt.is_running, "vm not running")
@pytest.mark.meta(automates=[1723485, 1571895, 1650506])
@pytest.mark.provider([VMwareProvider, RHEVMProvider], override=True)
@pytest.mark.meta(blockers=[BZ(1723485, forced_streams=["5.11"],
unblock=lambda provider: not (provider.one_of(RHEVMProvider)
and not provider.version < 4.3))])
def test_guest_os_shutdown(appliance, provider, testing_vm_tools, ensure_vm_running, soft_assert):
"""Tests vm guest os reset
Polarion:
assignee: ghubale
initialEstimate: 1/6h
caseimportance: high
casecomponent: Infra
tags: power
Bugzilla:
1723485
1571895
1650506
"""
# TODO([email protected]): Update this test case for power operation(shutdown guest) for scvmm
testing_vm_tools.wait_for_vm_state_change(
desired_state=testing_vm_tools.STATE_ON, timeout=720, from_details=True)
wait_for_vm_tools(testing_vm_tools)
view = navigate_to(testing_vm_tools, "Details")
last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
testing_vm_tools.power_control_from_cfme(
option=testing_vm_tools.GUEST_SHUTDOWN, cancel=False, from_details=True)
view.flash.assert_success_message(text='Shutdown Guest initiated', partial=True)
testing_vm_tools.wait_for_vm_state_change(
desired_state=testing_vm_tools.STATE_OFF, timeout=720, from_details=True)
soft_assert(
not testing_vm_tools.mgmt.is_running, "vm running")
# Blocking this assertion for RHEV providers because of BZ(1571895) not fixed yet
if not (BZ(1571895, forced_streams=["5.10", "5.11"]).blocks and provider.one_of(RHEVMProvider)):
new_last_boot_time = view.entities.summary("Power Management").get_text_of("Last Boot Time")
soft_assert(new_last_boot_time == last_boot_time,
"ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
@pytest.fixture(scope="function")
def new_user(request, appliance):
user, user_data = _users(request, appliance, group="EvmGroup-vm_user")
yield appliance.collections.users.instantiate(
name=user[0].name,
credential=Credential(principal=user_data[0]["userid"], secret=user_data[0]["password"]),
)
if user[0].exists:
user[0].action.delete()
@pytest.mark.tier(1)
@pytest.mark.meta(automates=[1687597])
@pytest.mark.provider([VMwareProvider], selector=ONE_PER_TYPE, override=True)
def test_retire_vm_with_vm_user_role(new_user, appliance, testing_vm):
"""
Bugzilla:
1687597
Polarion:
assignee: ghubale
initialEstimate: 1/8h
caseposneg: positive
startsin: 5.10
casecomponent: Automate
setup:
1. Provision vm
testSteps:
1. Create custom user with 'EvmRole_vm-user' role
2. Retire VM by log-in to custom user
"""
# Log in with new user to retire the vm
with new_user:
view = navigate_to(testing_vm.parent, "All")
view.entities.get_entity(name=testing_vm.name, surf_pages=True).check()
assert view.toolbar.lifecycle.item_enabled("Retire selected items")
testing_vm.retire()
assert testing_vm.wait_for_vm_state_change(desired_state="retired", timeout=720,
from_details=True)
@pytest.fixture(params=['archived', 'orphaned'])
def archive_orphan_vm(request, provider, testing_vm):
"""This fixture is used to create archived or orphaned VM"""
if request.param == "archived":
# Archive VM by retiring it
testing_vm.mgmt.delete()
testing_vm.wait_for_vm_state_change(desired_state='archived', timeout=720,
from_details=False, from_any_provider=True)
else:
# Orphan VM by removing provider from CFME
provider.delete_if_exists(cancel=False)
testing_vm.wait_for_vm_state_change(desired_state='orphaned', timeout=720,
from_details=False, from_any_provider=True)
yield request.param, testing_vm
@pytest.mark.meta(automates=[1655477, 1686015])
def test_power_options_on_archived_orphaned_vms_all_page(appliance, archive_orphan_vm):
"""This test case is to check Power option drop-down button is disabled on archived and orphaned
VMs all page. Also it performs the power operations on vm and checked expected flash messages.
Bugzilla:
1655477
1686015
Polarion:
assignee: ghubale
initialEstimate: 1/2h
caseimportance: low
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: Control
tags: power
testSteps:
1. Add infrastructure provider
2. Navigate to Archived or orphaned VMs all page
3. Select any VM and click on power option drop-down
"""
infra_vms = appliance.collections.infra_vms
state, testing_vm = archive_orphan_vm
if state == "archived":
view = navigate_to(infra_vms, 'ArchivedAll')
# Selecting particular archived vm
testing_vm.find_quadicon(from_archived_all=True).check()
else:
view = navigate_to(infra_vms, 'OrphanedAll')
# Selecting particular orphaned vm
testing_vm.find_quadicon(from_orphaned_all=True).check()
# After selecting particular archived/orphaned vm; 'Power' drop down gets enabled.
# Reading all the options available in 'power' drop down
for action in view.toolbar.power.items:
# Performing power actions on archived/orphaned vm
view.toolbar.power.item_select(action, handle_alert=True)
if action == 'Power On':
action = 'Start'
elif action == 'Power Off':
action = 'Stop'
view.flash.assert_message(f'{action} action does not apply to selected items')
view.flash.dismiss()
|
gpl-2.0
|
NeovaHealth/odoo
|
addons/stock_account/wizard/stock_return_picking.py
|
342
|
2715
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_return_picking(osv.osv_memory):
_inherit = 'stock.return.picking'
_columns = {
'invoice_state': fields.selection([('2binvoiced', 'To be refunded/invoiced'), ('none', 'No invoicing')], 'Invoicing',required=True),
}
def default_get(self, cr, uid, fields, context=None):
res = super(stock_return_picking, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
pick_obj = self.pool.get('stock.picking')
pick = pick_obj.browse(cr, uid, record_id, context=context)
if pick:
if 'invoice_state' in fields:
if pick.invoice_state=='invoiced':
res.update({'invoice_state': '2binvoiced'})
else:
res.update({'invoice_state': 'none'})
return res
def _create_returns(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.browse(cr, uid, ids[0], context=context)
new_picking, picking_type_id = super(stock_return_picking, self)._create_returns(cr, uid, ids, context=context)
if data.invoice_state == '2binvoiced':
pick_obj = self.pool.get("stock.picking")
move_obj = self.pool.get("stock.move")
move_ids = [x.id for x in pick_obj.browse(cr, uid, new_picking, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'invoice_state': '2binvoiced'})
return new_picking, picking_type_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
aperigault/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_cluster.py
|
26
|
10668
|
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_cluster
short_description: NetApp ONTAP cluster - create, join, add license
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Create or join or apply licenses to ONTAP clusters
- Cluster join can be performed using only one of the parameters, either cluster_name or cluster_ip_address
options:
state:
description:
- Whether the specified cluster should exist or not.
choices: ['present']
default: present
cluster_name:
description:
- The name of the cluster to manage.
cluster_ip_address:
description:
- IP address of cluster to be joined
license_code:
description:
- License code to be applied to the cluster
license_package:
description:
- License package name of the license to be removed
node_serial_number:
description:
- Serial number of the cluster node
'''
EXAMPLES = """
- name: Create cluster
na_ontap_cluster:
state: present
cluster_name: new_cluster
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Add license from cluster
na_ontap_cluster:
state: present
cluster_name: FPaaS-A300-01
license_code: SGHLQDBBVAAAAAAAAAAAAAAAAAAA
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Join cluster
na_ontap_cluster:
state: present
cluster_ip_address: 10.61.184.181
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Join cluster
na_ontap_cluster:
state: present
cluster_name: FPaaS-A300-01
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
def local_cmp(a, b):
"""
compares with only values and not keys, keys should be the same for both dicts
:param a: dict 1
:param b: dict 2
:return: difference of values in both dicts
"""
diff = [key for key in a if a[key] != b[key]]
return len(diff)
class NetAppONTAPCluster(object):
"""
object initialize and class methods
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present'], default='present'),
cluster_name=dict(required=False, type='str'),
cluster_ip_address=dict(required=False, type='str'),
license_code=dict(required=False, type='str'),
license_package=dict(required=False, type='str'),
node_serial_number=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
required_together=[
['license_package', 'node_serial_number']
]
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_licensing_status(self):
"""
Check licensing status
:return: package (key) and licensing status (value)
:rtype: dict
"""
license_status = netapp_utils.zapi.NaElement(
'license-v2-status-list-info')
try:
result = self.server.invoke_successfully(license_status,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error checking license status: %s" %
to_native(error), exception=traceback.format_exc())
return_dictionary = {}
license_v2_status = result.get_child_by_name('license-v2-status')
if license_v2_status:
for license_v2_status_info in license_v2_status.get_children():
package = license_v2_status_info.get_child_content('package')
status = license_v2_status_info.get_child_content('method')
return_dictionary[package] = status
return return_dictionary
def create_cluster(self):
"""
Create a cluster
"""
cluster_create = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-create', **{'cluster-name': self.parameters['cluster_name']})
try:
self.server.invoke_successfully(cluster_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
# Error 36503 denotes node already being used.
if to_native(error.code) == "36503":
return False
else:
self.module.fail_json(msg='Error creating cluster %s: %s'
% (self.parameters['cluster_name'], to_native(error)),
exception=traceback.format_exc())
return True
def cluster_join(self):
"""
Add a node to an existing cluster
"""
if self.parameters.get('cluster_ip_address') is not None:
cluster_add_node = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-join', **{'cluster-ip-address': self.parameters['cluster_ip_address']})
for_fail_attribute = self.parameters.get('cluster_ip_address')
elif self.parameters.get('cluster_name') is not None:
cluster_add_node = netapp_utils.zapi.NaElement.create_node_with_children(
'cluster-join', **{'cluster-name': self.parameters['cluster_name']})
for_fail_attribute = self.parameters.get('cluster_name')
else:
return False
try:
self.server.invoke_successfully(cluster_add_node, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
# Error 36503 denotes node already being used.
if to_native(error.code) == "36503":
return False
else:
self.module.fail_json(msg='Error adding node to cluster %s: %s'
% (for_fail_attribute, to_native(error)),
exception=traceback.format_exc())
return True
def license_v2_add(self):
"""
Apply a license to cluster
"""
license_add = netapp_utils.zapi.NaElement.create_node_with_children('license-v2-add')
license_add.add_node_with_children('codes', **{'license-code-v2': self.parameters['license_code']})
try:
self.server.invoke_successfully(license_add, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error adding license %s: %s'
% (self.parameters['license_code'], to_native(error)),
exception=traceback.format_exc())
def license_v2_delete(self):
"""
Delete license from cluster
"""
license_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'license-v2-delete', **{'package': self.parameters['license_package'],
'serial-number': self.parameters['node_serial_number']})
try:
self.server.invoke_successfully(license_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting license : %s' % (to_native(error)),
exception=traceback.format_exc())
def autosupport_log(self):
"""
Autosupport log for cluster
:return:
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_cluster", cserver)
def apply(self):
"""
Apply action to cluster
"""
property_changed = False
create_flag = False
join_flag = False
self.autosupport_log()
license_status = self.get_licensing_status()
if self.module.check_mode:
pass
else:
if self.parameters.get('state') == 'present':
if self.parameters.get('cluster_name') is not None:
create_flag = self.create_cluster()
if not create_flag:
join_flag = self.cluster_join()
if self.parameters.get('license_code') is not None:
self.license_v2_add()
property_changed = True
if self.parameters.get('license_package') is not None and\
self.parameters.get('node_serial_number') is not None:
if license_status.get(str(self.parameters.get('license_package')).lower()) != 'none':
self.license_v2_delete()
property_changed = True
if property_changed:
new_license_status = self.get_licensing_status()
if local_cmp(license_status, new_license_status) == 0:
property_changed = False
changed = property_changed or create_flag or join_flag
self.module.exit_json(changed=changed)
def main():
"""
Create object and call apply
"""
cluster_obj = NetAppONTAPCluster()
cluster_obj.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
|
linked67/p2pool-lire
|
wstools/TimeoutSocket.py
|
293
|
5293
|
"""Based on code from timeout_socket.py, with some tweaks for compatibility.
These tweaks should really be rolled back into timeout_socket, but it's
not totally clear who is maintaining it at this point. In the meantime,
we'll use a different module name for our tweaked version to avoid any
confusion.
The original timeout_socket is by:
Scott Cotton <[email protected]>
Lloyd Zusman <[email protected]>
Phil Mayes <[email protected]>
Piers Lauder <[email protected]>
Radovan Garabik <[email protected]>
"""
ident = "$Id$"
import string, socket, select, errno
WSAEINVAL = getattr(errno, 'WSAEINVAL', 10022)
class TimeoutSocket:
"""A socket imposter that supports timeout limits."""
def __init__(self, timeout=20, sock=None):
self.timeout = float(timeout)
self.inbuf = ''
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = sock
self.sock.setblocking(0)
self._rbuf = ''
self._wbuf = ''
def __getattr__(self, name):
# Delegate to real socket attributes.
return getattr(self.sock, name)
def connect(self, *addr):
timeout = self.timeout
sock = self.sock
try:
# Non-blocking mode
sock.setblocking(0)
apply(sock.connect, addr)
sock.setblocking(timeout != 0)
return 1
except socket.error,why:
if not timeout:
raise
sock.setblocking(1)
if len(why.args) == 1:
code = 0
else:
code, why = why
if code not in (
errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK
):
raise
r,w,e = select.select([],[sock],[],timeout)
if w:
try:
apply(sock.connect, addr)
return 1
except socket.error,why:
if len(why.args) == 1:
code = 0
else:
code, why = why
if code in (errno.EISCONN, WSAEINVAL):
return 1
raise
raise TimeoutError('socket connect() timeout.')
def send(self, data, flags=0):
total = len(data)
next = 0
while 1:
r, w, e = select.select([],[self.sock], [], self.timeout)
if w:
buff = data[next:next + 8192]
sent = self.sock.send(buff, flags)
next = next + sent
if next == total:
return total
continue
raise TimeoutError('socket send() timeout.')
def recv(self, amt, flags=0):
if select.select([self.sock], [], [], self.timeout)[0]:
return self.sock.recv(amt, flags)
raise TimeoutError('socket recv() timeout.')
buffsize = 4096
handles = 1
def makefile(self, mode="r", buffsize=-1):
self.handles = self.handles + 1
self.mode = mode
return self
def close(self):
self.handles = self.handles - 1
if self.handles == 0 and self.sock.fileno() >= 0:
self.sock.close()
def read(self, n=-1):
if not isinstance(n, type(1)):
n = -1
if n >= 0:
k = len(self._rbuf)
if n <= k:
data = self._rbuf[:n]
self._rbuf = self._rbuf[n:]
return data
n = n - k
L = [self._rbuf]
self._rbuf = ""
while n > 0:
new = self.recv(max(n, self.buffsize))
if not new: break
k = len(new)
if k > n:
L.append(new[:n])
self._rbuf = new[n:]
break
L.append(new)
n = n - k
return "".join(L)
k = max(4096, self.buffsize)
L = [self._rbuf]
self._rbuf = ""
while 1:
new = self.recv(k)
if not new: break
L.append(new)
k = min(k*2, 1024**2)
return "".join(L)
def readline(self, limit=-1):
data = ""
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self.recv(self.buffsize)
if not new: break
i = new.find('\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def writelines(self, list):
self.send(''.join(list))
def write(self, data):
self.send(data)
def flush(self):
pass
class TimeoutError(Exception):
pass
|
gpl-3.0
|
sugartom/tensorflow-alien
|
tensorflow/contrib/bayesflow/__init__.py
|
57
|
1871
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing Bayesian computation.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.contrib.bayesflow.python.ops import entropy
from tensorflow.contrib.bayesflow.python.ops import monte_carlo
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_graph
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.contrib.bayesflow.python.ops import stochastic_variables
from tensorflow.contrib.bayesflow.python.ops import variational_inference
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['entropy', 'monte_carlo',
'special_math', 'stochastic_gradient_estimators',
'stochastic_graph', 'stochastic_tensor',
'stochastic_variables', 'variational_inference']
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
NEricN/RobotCSimulator
|
Python/App/Lib/site-packages/pip/backwardcompat/__init__.py
|
394
|
3756
|
"""Stuff that differs in different Python versions and platform
distributions."""
import os
import imp
import sys
import site
__all__ = ['WindowsError']
uses_pycache = hasattr(imp, 'cache_from_source')
class NeverUsedException(Exception):
"""this exception should never be raised"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = NeverUsedException
try:
#new in Python 3.3
PermissionError = PermissionError
except NameError:
PermissionError = NeverUsedException
console_encoding = sys.__stdout__.encoding
if sys.version_info >= (3,):
from io import StringIO, BytesIO
from functools import reduce
from urllib.error import URLError, HTTPError
from queue import Queue, Empty
from urllib.request import url2pathname, urlretrieve, pathname2url
from email import message as emailmessage
import urllib.parse as urllib
import urllib.request as urllib2
import configparser as ConfigParser
import xmlrpc.client as xmlrpclib
import urllib.parse as urlparse
import http.client as httplib
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
def console_to_str(s):
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def get_http_message_param(http_message, param, default_value):
return http_message.get_param(param, default_value)
bytes = bytes
string_types = (str,)
raw_input = input
else:
from cStringIO import StringIO
from urllib2 import URLError, HTTPError
from Queue import Queue, Empty
from urllib import url2pathname, urlretrieve, pathname2url
from email import Message as emailmessage
import urllib
import urllib2
import urlparse
import ConfigParser
import xmlrpclib
import httplib
def b(s):
return s
def u(s):
return s
def console_to_str(s):
return s
def get_http_message_param(http_message, param, default_value):
result = http_message.getparam(param)
return result or default_value
bytes = str
string_types = (basestring,)
reduce = reduce
cmp = cmp
raw_input = raw_input
BytesIO = StringIO
from distutils.sysconfig import get_python_lib, get_python_version
#site.USER_SITE was created in py2.6
user_site = getattr(site, 'USER_SITE', None)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks: https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in backwardcompat due to differences on AIX and Jython,
that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError("%s is a symlink; Will not return uid for symlinks" % path)
return file_uid
|
apache-2.0
|
localu/metagoofil
|
hachoir_parser/container/swf.py
|
84
|
16477
|
"""
SWF (Macromedia/Adobe Flash) file parser.
Documentation:
- Alexis' SWF Reference:
http://www.m2osw.com/swf_alexref.html
- http://www.half-serious.com/swf/format/
- http://www.anotherbigidea.com/javaswf/
- http://www.gnu.org/software/gnash/
Author: Victor Stinner
Creation date: 29 october 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, UInt8, UInt16, Int32, UInt32, Int64, CString, Enum,
Bytes, RawBytes, NullBits, String, SubFile)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from hachoir_core.text_handler import textHandler, filesizeHandler
from hachoir_core.tools import paddingSize, humanFrequency
from hachoir_parser.image.common import RGB
from hachoir_parser.image.jpeg import JpegChunk, JpegFile
from hachoir_core.stream import StringInputStream, ConcatStream
from hachoir_parser.common.deflate import Deflate, has_deflate
from hachoir_parser.container.action_script import parseActionScript, parseABC
import math
# Maximum file size (50 MB)
MAX_FILE_SIZE = 50 * 1024 * 1024
TWIPS = 20
class RECT(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
yield Bits(self, "nbits", 5)
nbits = self["nbits"].value
if not nbits:
raise ParserError("SWF parser: Invalid RECT field size (0)")
yield Bits(self, "xmin", nbits, "X minimum in twips")
yield Bits(self, "xmax", nbits, "X maximum in twips")
yield Bits(self, "ymin", nbits, "Y minimum in twips")
yield Bits(self, "ymax", nbits, "Y maximum in twips")
size = paddingSize(self.current_size, 8)
if size:
yield NullBits(self, "padding", size)
def getWidth(self):
return math.ceil(float(self["xmax"].value) / TWIPS)
def getHeight(self):
return math.ceil(float(self["ymax"].value) / TWIPS)
def createDescription(self):
return "Rectangle: %ux%u" % (self.getWidth(), self.getHeight())
class FixedFloat16(FieldSet):
def createFields(self):
yield UInt8(self, "float_part")
yield UInt8(self, "int_part")
def createValue(self):
return self["int_part"].value + float(self["float_part"].value) / 256
def parseBackgroundColor(parent, size):
yield RGB(parent, "color")
def bit2hertz(field):
return humanFrequency(5512.5 * (2 ** field.value))
SOUND_CODEC_MP3 = 2
SOUND_CODEC = {
0: "RAW",
1: "ADPCM",
SOUND_CODEC_MP3: "MP3",
3: "Uncompressed",
6: "Nellymoser",
}
class SoundEnvelope(FieldSet):
def createFields(self):
yield UInt8(self, "count")
for index in xrange(self["count"].value):
yield UInt32(self, "mark44[]")
yield UInt16(self, "level0[]")
yield UInt16(self, "level1[]")
def parseSoundBlock(parent, size):
# TODO: Be able to get codec... Need to know last sound "def_sound[]" field
# if not (...)sound_header:
# raise ParserError("Sound block without header")
if True: #sound_header == SOUND_CODEC_MP3:
yield UInt16(parent, "samples")
yield UInt16(parent, "left")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseStartSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "has_in_point")
yield Bit(parent, "has_out_point")
yield Bit(parent, "has_loops")
yield Bit(parent, "has_envelope")
yield Bit(parent, "no_multiple")
yield Bit(parent, "stop_playback")
yield NullBits(parent, "reserved", 2)
if parent["has_in_point"].value:
yield UInt32(parent, "in_point")
if parent["has_out_point"].value:
yield UInt32(parent, "out_point")
if parent["has_loops"].value:
yield UInt16(parent, "loop_count")
if parent["has_envelope"].value:
yield SoundEnvelope(parent, "envelope")
def parseDefineSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "is_stereo")
yield Bit(parent, "is_16bit")
yield textHandler(Bits(parent, "rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt32(parent, "sample_count")
if parent["codec"].value == SOUND_CODEC_MP3:
yield UInt16(parent, "len")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseSoundHeader(parent, size):
yield Bit(parent, "playback_is_stereo")
yield Bit(parent, "playback_is_16bit")
yield textHandler(Bits(parent, "playback_rate", 2), bit2hertz)
yield NullBits(parent, "reserved", 4)
yield Bit(parent, "sound_is_stereo")
yield Bit(parent, "sound_is_16bit")
yield textHandler(Bits(parent, "sound_rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt16(parent, "sample_count")
if parent["codec"].value == 2:
yield UInt16(parent, "latency_seek")
class JpegHeader(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
count = 1
while True:
chunk = JpegChunk(self, "jpeg_chunk[]")
yield chunk
if 1 < count and chunk["type"].value in (JpegChunk.TAG_SOI, JpegChunk.TAG_EOI):
break
count += 1
def parseJpeg(parent, size):
yield UInt16(parent, "char_id", "Character identifier")
size -= 2
code = parent["code"].value
if code != Tag.TAG_BITS:
if code == Tag.TAG_BITS_JPEG3:
yield UInt32(parent, "alpha_offset", "Character identifier")
size -= 4
addr = parent.absolute_address + parent.current_size + 16
if parent.stream.readBytes(addr, 2) in ("\xff\xdb", "\xff\xd8"):
header = JpegHeader(parent, "jpeg_header")
yield header
hdr_size = header.size // 8
size -= hdr_size
else:
hdr_size = 0
if code == Tag.TAG_BITS_JPEG3:
img_size = parent["alpha_offset"].value - hdr_size
else:
img_size = size
else:
img_size = size
yield SubFile(parent, "image", img_size, "JPEG picture", parser=JpegFile)
if code == Tag.TAG_BITS_JPEG3:
size = (parent.size - parent.current_size) // 8
yield RawBytes(parent, "alpha", size, "Image data")
def parseVideoFrame(parent, size):
yield UInt16(parent, "stream_id")
yield UInt16(parent, "frame_num")
if 4 < size:
yield RawBytes(parent, "video_data", size-4)
class Export(FieldSet):
def createFields(self):
yield UInt16(self, "object_id")
yield CString(self, "name")
def parseExport(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield Export(parent, "export[]")
def parseProductInfo(parent, size):
yield Int32(parent, "product_id")
yield Int32(parent, "edition")
yield UInt8(parent, "major_version")
yield UInt8(parent, "minor_version")
yield Int64(parent, "build_number")
yield Int64(parent, "compilation_date")
def parseScriptLimits(parent, size):
yield UInt16(parent, "max_recursion_limit")
yield UInt16(parent, "timeout_seconds", "Seconds of processing until the SWF is considered 'stuck'")
def parseSymbolClass(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield UInt16(parent, "symbol_id[]")
yield CString(parent, "symbol_name[]")
def parseBinaryData(parent, size):
yield UInt16(parent, "data_id")
yield UInt32(parent, "reserved")
if size > 6:
yield RawBytes(parent, "data", size-6)
class Tag(FieldSet):
TAG_BITS = 6
TAG_BITS_JPEG2 = 32
TAG_BITS_JPEG3 = 35
TAG_DO_ABC_DEFINE = 82
TAG_INFO = {
# SWF version 1.0
0: ("end[]", "End", None),
1: ("show_frame[]", "Show frame", None),
2: ("def_shape[]", "Define shape", None),
3: ("free_char[]", "Free character", None),
4: ("place_obj[]", "Place object", None),
5: ("remove_obj[]", "Remove object", None),
6: ("def_bits[]", "Define bits", parseJpeg),
7: ("def_but[]", "Define button", None),
8: ("jpg_table", "JPEG tables", None),
9: ("bkgd_color[]", "Set background color", parseBackgroundColor),
10: ("def_font[]", "Define font", None),
11: ("def_text[]", "Define text", None),
12: ("action[]", "Action script", parseActionScript),
13: ("def_font_info[]", "Define font info", None),
# SWF version 2.0
14: ("def_sound[]", "Define sound", parseDefineSound),
15: ("start_sound[]", "Start sound", parseStartSound),
16: ("stop_sound[]", "Stop sound", None),
17: ("def_but_sound[]", "Define button sound", None),
18: ("sound_hdr", "Sound stream header", parseSoundHeader),
19: ("sound_blk[]", "Sound stream block", parseSoundBlock),
20: ("def_bits_lossless[]", "Define bits lossless", None),
21: ("def_bits_jpeg2[]", "Define bits JPEG 2", parseJpeg),
22: ("def_shape2[]", "Define shape 2", None),
23: ("def_but_cxform[]", "Define button CXFORM", None),
24: ("protect", "File is protected", None),
# SWF version 3.0
25: ("path_are_ps[]", "Paths are Postscript", None),
26: ("place_obj2[]", "Place object 2", None),
28: ("remove_obj2[]", "Remove object 2", None),
29: ("sync_frame[]", "Synchronize frame", None),
31: ("free_all[]", "Free all", None),
32: ("def_shape3[]", "Define shape 3", None),
33: ("def_text2[]", "Define text 2", None),
34: ("def_but2[]", "Define button2", None),
35: ("def_bits_jpeg3[]", "Define bits JPEG 3", parseJpeg),
36: ("def_bits_lossless2[]", "Define bits lossless 2", None),
39: ("def_sprite[]", "Define sprite", None),
40: ("name_character[]", "Name character", None),
41: ("product_info", "Generator product info", parseProductInfo),
42: ("generator_text[]", "Generator text", None),
43: ("frame_label[]", "Frame label", None),
45: ("sound_hdr2[]", "Sound stream header2", parseSoundHeader),
46: ("def_morph_shape[]", "Define morph shape", None),
47: ("gen_frame[]", "Generate frame", None),
48: ("def_font2[]", "Define font 2", None),
49: ("tpl_command[]", "Template command", None),
# SWF version 4.0
37: ("def_text_field[]", "Define text field", None),
38: ("def_quicktime_movie[]", "Define QuickTime movie", None),
# SWF version 5.0
50: ("def_cmd_obj[]", "Define command object", None),
51: ("flash_generator", "Flash generator", None),
52: ("gen_ext_font[]", "Gen external font", None),
56: ("export[]", "Export", parseExport),
57: ("import[]", "Import", None),
58: ("ebnable_debug", "Enable debug", None),
# SWF version 6.0
59: ("do_init_action[]", "Do init action", None),
60: ("video_str[]", "Video stream", None),
61: ("video_frame[]", "Video frame", parseVideoFrame),
62: ("def_font_info2[]", "Define font info 2", None),
63: ("mx4[]", "MX4", None),
64: ("enable_debug2", "Enable debugger 2", None),
# SWF version 7.0
65: ("script_limits[]", "Script limits", parseScriptLimits),
66: ("tab_index[]", "Set tab index", None),
# SWF version 8.0
69: ("file_attr[]", "File attributes", None),
70: ("place_obj3[]", "Place object 3", None),
71: ("import2[]", "Import a definition list from another movie", None),
73: ("def_font_align[]", "Define font alignment zones", None),
74: ("csm_txt_set[]", "CSM text settings", None),
75: ("def_font3[]", "Define font text 3", None),
77: ("metadata[]", "XML code describing the movie", None),
78: ("def_scale_grid[]", "Define scaling factors", None),
83: ("def_shape4[]", "Define shape 4", None),
84: ("def_morph2[]", "Define a morphing shape 2", None),
# SWF version 9.0
72: ("do_abc[]", "SWF 9 ActionScript container; actions only", parseABC),
76: ("symbol_class[]", "Instantiate objects from a set of classes", parseSymbolClass),
82: ("do_abc_define[]", "SWF 9 ActionScript container; identifier, name, actions", parseABC),
86: ("def_scene_frame[]", "Define raw data for scenes and frames", None),
87: ("def_binary_data[]", "Defines a buffer of any size with any binary user data", parseBinaryData),
88: ("def_font_name[]", "Define the legal font name and copyright", None),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = self["length"].value
if self[0].name == "length_ext":
self._size = (6+size) * 8
else:
self._size = (2+size) * 8
code = self["code"].value
if code in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[code]
else:
self.parser = None
def createFields(self):
if self.stream.readBits(self.absolute_address, 6, self.endian) == 63:
yield Bits(self, "length_ext", 6)
yield Bits(self, "code", 10)
yield filesizeHandler(UInt32(self, "length"))
else:
yield filesizeHandler(Bits(self, "length", 6))
yield Bits(self, "code", 10)
size = self["length"].value
if 0 < size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
def createDescription(self):
return "Tag: %s (%s)" % (self["code"].display, self["length"].display)
class SwfFile(Parser):
VALID_VERSIONS = set(xrange(1, 10+1))
PARSER_TAGS = {
"id": "swf",
"category": "container",
"file_ext": ["swf"],
"mime": (u"application/x-shockwave-flash",),
"min_size": 64,
"description": u"Macromedia Flash data"
}
PARSER_TAGS["magic"] = []
for version in VALID_VERSIONS:
PARSER_TAGS["magic"].append(("FWS%c" % version, 0))
PARSER_TAGS["magic"].append(("CWS%c" % version, 0))
endian = LITTLE_ENDIAN
SWF_SCALE_FACTOR = 1.0 / 20
def validate(self):
if self.stream.readBytes(0, 3) not in ("FWS", "CWS"):
return "Wrong file signature"
if self["version"].value not in self.VALID_VERSIONS:
return "Unknown version"
if MAX_FILE_SIZE < self["filesize"].value:
return "File too big (%u)" % self["filesize"].value
if self["signature"].value == "FWS":
if self["rect/padding"].value != 0:
return "Unknown rectangle padding value"
return True
def createFields(self):
yield String(self, "signature", 3, "SWF format signature", charset="ASCII")
yield UInt8(self, "version")
yield filesizeHandler(UInt32(self, "filesize"))
if self["signature"].value != "CWS":
yield RECT(self, "rect")
yield FixedFloat16(self, "frame_rate")
yield UInt16(self, "frame_count")
while not self.eof:
yield Tag(self, "tag[]")
else:
size = (self.size - self.current_size) // 8
if has_deflate:
data = Deflate(Bytes(self, "compressed_data", size), False)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
header = StringInputStream("FWS" + self.stream.readBytes(3*8, 5))
args.setdefault("tags",[]).append(("class", SwfFile))
return ConcatStream((header, stream), source=stream.source, **args)
data.setSubIStream(createInputStream)
yield data
else:
yield Bytes(self, "compressed_data", size)
def createDescription(self):
desc = ["version %u" % self["version"].value]
if self["signature"].value == "CWS":
desc.append("compressed")
return u"Macromedia Flash data: %s" % (", ".join(desc))
def createContentSize(self):
if self["signature"].value == "FWS":
return self["filesize"].value * 8
else:
# TODO: Size of compressed Flash?
return None
|
gpl-2.0
|
chen0031/Dato-Core
|
src/unity/python/doc/scripts/doxypy-0.4.2.py
|
15
|
14005
|
#!/usr/bin/env python
__applicationName__ = "doxypy"
__blurb__ = """
doxypy is an input filter for Doxygen. It preprocesses python
files so that docstrings of classes and functions are reformatted
into Doxygen-conform documentation blocks.
"""
__doc__ = __blurb__ + \
"""
In order to make Doxygen preprocess files through doxypy, simply
add the following lines to your Doxyfile:
FILTER_SOURCE_FILES = YES
INPUT_FILTER = "python /path/to/doxypy.py"
"""
__version__ = "0.4.2"
__date__ = "14th October 2009"
__website__ = "http://code.foosel.org/doxypy"
__author__ = (
"Philippe 'demod' Neumann (doxypy at demod dot org)",
"Gina 'foosel' Haeussge (gina at foosel dot net)"
)
__licenseName__ = "GPL v2"
__license__ = """This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import re
from optparse import OptionParser, OptionGroup
class FSM(object):
"""Implements a finite state machine.
Transitions are given as 4-tuples, consisting of an origin state, a target
state, a condition for the transition (given as a reference to a function
which gets called with a given piece of input) and a pointer to a function
to be called upon the execution of the given transition.
"""
"""
@var transitions holds the transitions
@var current_state holds the current state
@var current_input holds the current input
@var current_transition hold the currently active transition
"""
def __init__(self, start_state=None, transitions=[]):
self.transitions = transitions
self.current_state = start_state
self.current_input = None
self.current_transition = None
def setStartState(self, state):
self.current_state = state
def addTransition(self, from_state, to_state, condition, callback):
self.transitions.append([from_state, to_state, condition, callback])
def makeTransition(self, input):
"""Makes a transition based on the given input.
@param input input to parse by the FSM
"""
for transition in self.transitions:
[from_state, to_state, condition, callback] = transition
if from_state == self.current_state:
match = condition(input)
if match:
self.current_state = to_state
self.current_input = input
self.current_transition = transition
if options.debug:
print >>sys.stderr, "# FSM: executing (%s -> %s) for line '%s'" % (from_state, to_state, input)
callback(match)
return
class Doxypy(object):
def __init__(self):
string_prefixes = "[uU]?[rR]?"
self.start_single_comment_re = re.compile("^\s*%s(''')" % string_prefixes)
self.end_single_comment_re = re.compile("(''')\s*$")
self.start_double_comment_re = re.compile("^\s*%s(\"\"\")" % string_prefixes)
self.end_double_comment_re = re.compile("(\"\"\")\s*$")
self.single_comment_re = re.compile("^\s*%s(''').*(''')\s*$" % string_prefixes)
self.double_comment_re = re.compile("^\s*%s(\"\"\").*(\"\"\")\s*$" % string_prefixes)
self.defclass_re = re.compile("^(\s*)(def .+:|class .+:)")
self.empty_re = re.compile("^\s*$")
self.hashline_re = re.compile("^\s*#.*$")
self.importline_re = re.compile("^\s*(import |from .+ import)")
self.multiline_defclass_start_re = re.compile("^(\s*)(def|class)(\s.*)?$")
self.multiline_defclass_end_re = re.compile(":\s*$")
## Transition list format
# ["FROM", "TO", condition, action]
transitions = [
### FILEHEAD
# single line comments
["FILEHEAD", "FILEHEAD", self.single_comment_re.search, self.appendCommentLine],
["FILEHEAD", "FILEHEAD", self.double_comment_re.search, self.appendCommentLine],
# multiline comments
["FILEHEAD", "FILEHEAD_COMMENT_SINGLE", self.start_single_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_SINGLE", "FILEHEAD", self.end_single_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_SINGLE", "FILEHEAD_COMMENT_SINGLE", self.catchall, self.appendCommentLine],
["FILEHEAD", "FILEHEAD_COMMENT_DOUBLE", self.start_double_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD", self.end_double_comment_re.search, self.appendCommentLine],
["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD_COMMENT_DOUBLE", self.catchall, self.appendCommentLine],
# other lines
["FILEHEAD", "FILEHEAD", self.empty_re.search, self.appendFileheadLine],
["FILEHEAD", "FILEHEAD", self.hashline_re.search, self.appendFileheadLine],
["FILEHEAD", "FILEHEAD", self.importline_re.search, self.appendFileheadLine],
["FILEHEAD", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
["FILEHEAD", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.resetCommentSearch],
["FILEHEAD", "DEFCLASS_BODY", self.catchall, self.appendFileheadLine],
### DEFCLASS
# single line comments
["DEFCLASS", "DEFCLASS_BODY", self.single_comment_re.search, self.appendCommentLine],
["DEFCLASS", "DEFCLASS_BODY", self.double_comment_re.search, self.appendCommentLine],
# multiline comments
["DEFCLASS", "COMMENT_SINGLE", self.start_single_comment_re.search, self.appendCommentLine],
["COMMENT_SINGLE", "DEFCLASS_BODY", self.end_single_comment_re.search, self.appendCommentLine],
["COMMENT_SINGLE", "COMMENT_SINGLE", self.catchall, self.appendCommentLine],
["DEFCLASS", "COMMENT_DOUBLE", self.start_double_comment_re.search, self.appendCommentLine],
["COMMENT_DOUBLE", "DEFCLASS_BODY", self.end_double_comment_re.search, self.appendCommentLine],
["COMMENT_DOUBLE", "COMMENT_DOUBLE", self.catchall, self.appendCommentLine],
# other lines
["DEFCLASS", "DEFCLASS", self.empty_re.search, self.appendDefclassLine],
["DEFCLASS", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch],
["DEFCLASS", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.resetCommentSearch],
["DEFCLASS", "DEFCLASS_BODY", self.catchall, self.stopCommentSearch],
### DEFCLASS_BODY
["DEFCLASS_BODY", "DEFCLASS", self.defclass_re.search, self.startCommentSearch],
["DEFCLASS_BODY", "DEFCLASS_MULTI", self.multiline_defclass_start_re.search, self.startCommentSearch],
["DEFCLASS_BODY", "DEFCLASS_BODY", self.catchall, self.appendNormalLine],
### DEFCLASS_MULTI
["DEFCLASS_MULTI", "DEFCLASS", self.multiline_defclass_end_re.search, self.appendDefclassLine],
["DEFCLASS_MULTI", "DEFCLASS_MULTI", self.catchall, self.appendDefclassLine],
]
self.fsm = FSM("FILEHEAD", transitions)
self.outstream = sys.stdout
self.output = []
self.comment = []
self.filehead = []
self.defclass = []
self.indent = ""
def __closeComment(self):
"""Appends any open comment block and triggering block to the output."""
if options.autobrief:
if len(self.comment) == 1 \
or (len(self.comment) > 2 and self.comment[1].strip() == ''):
self.comment[0] = self.__docstringSummaryToBrief(self.comment[0])
if self.comment:
block = self.makeCommentBlock()
self.output.extend(block)
if self.defclass:
self.output.extend(self.defclass)
def __docstringSummaryToBrief(self, line):
"""Adds \\brief to the docstrings summary line.
A \\brief is prepended, provided no other doxygen command is at the
start of the line.
"""
stripped = line.strip()
if stripped and not stripped[0] in ('@', '\\'):
return "\\brief " + line
else:
return line
def __flushBuffer(self):
"""Flushes the current outputbuffer to the outstream."""
if self.output:
try:
if options.debug:
print >>sys.stderr, "# OUTPUT: ", self.output
print >>self.outstream, "\n".join(self.output)
self.outstream.flush()
except IOError:
# Fix for FS#33. Catches "broken pipe" when doxygen closes
# stdout prematurely upon usage of INPUT_FILTER, INLINE_SOURCES
# and FILTER_SOURCE_FILES.
pass
self.output = []
def catchall(self, input):
"""The catchall-condition, always returns true."""
return True
def resetCommentSearch(self, match):
"""Restarts a new comment search for a different triggering line.
Closes the current commentblock and starts a new comment search.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: resetCommentSearch"
self.__closeComment()
self.startCommentSearch(match)
def startCommentSearch(self, match):
"""Starts a new comment search.
Saves the triggering line, resets the current comment and saves
the current indentation.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: startCommentSearch"
self.defclass = [self.fsm.current_input]
self.comment = []
self.indent = match.group(1)
def stopCommentSearch(self, match):
"""Stops a comment search.
Closes the current commentblock, resets the triggering line and
appends the current line to the output.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: stopCommentSearch"
self.__closeComment()
self.defclass = []
self.output.append(self.fsm.current_input)
def appendFileheadLine(self, match):
"""Appends a line in the FILEHEAD state.
Closes the open comment block, resets it and appends the current line.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendFileheadLine"
self.__closeComment()
self.comment = []
self.output.append(self.fsm.current_input)
def appendCommentLine(self, match):
"""Appends a comment line.
The comment delimiter is removed from multiline start and ends as
well as singleline comments.
"""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendCommentLine"
(from_state, to_state, condition, callback) = self.fsm.current_transition
# single line comment
if (from_state == "DEFCLASS" and to_state == "DEFCLASS_BODY") \
or (from_state == "FILEHEAD" and to_state == "FILEHEAD"):
# remove comment delimiter from begin and end of the line
activeCommentDelim = match.group(1)
line = self.fsm.current_input
self.comment.append(line[line.find(activeCommentDelim)+len(activeCommentDelim):line.rfind(activeCommentDelim)])
if (to_state == "DEFCLASS_BODY"):
self.__closeComment()
self.defclass = []
# multiline start
elif from_state == "DEFCLASS" or from_state == "FILEHEAD":
# remove comment delimiter from begin of the line
activeCommentDelim = match.group(1)
line = self.fsm.current_input
self.comment.append(line[line.find(activeCommentDelim)+len(activeCommentDelim):])
# multiline end
elif to_state == "DEFCLASS_BODY" or to_state == "FILEHEAD":
# remove comment delimiter from end of the line
activeCommentDelim = match.group(1)
line = self.fsm.current_input
self.comment.append(line[0:line.rfind(activeCommentDelim)])
if (to_state == "DEFCLASS_BODY"):
self.__closeComment()
self.defclass = []
# in multiline comment
else:
# just append the comment line
self.comment.append(self.fsm.current_input)
def appendNormalLine(self, match):
"""Appends a line to the output."""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendNormalLine"
self.output.append(self.fsm.current_input)
def appendDefclassLine(self, match):
"""Appends a line to the triggering block."""
if options.debug:
print >>sys.stderr, "# CALLBACK: appendDefclassLine"
self.defclass.append(self.fsm.current_input)
def makeCommentBlock(self):
"""Indents the current comment block with respect to the current
indentation level.
@returns a list of indented comment lines
"""
doxyStart = "##"
commentLines = self.comment
commentLines = map(lambda x: "%s# %s" % (self.indent, x), commentLines)
l = [self.indent + doxyStart]
l.extend(commentLines)
return l
def parse(self, input):
"""Parses a python file given as input string and returns the doxygen-
compatible representation.
@param input the python code to parse
@returns the modified python code
"""
lines = input.split("\n")
for line in lines:
self.fsm.makeTransition(line)
if self.fsm.current_state == "DEFCLASS":
self.__closeComment()
return "\n".join(self.output)
def parseFile(self, filename):
"""Parses a python file given as input string and returns the doxygen-
compatible representation.
@param input the python code to parse
@returns the modified python code
"""
f = open(filename, 'r')
for line in f:
self.parseLine(line.rstrip('\r\n'))
if self.fsm.current_state == "DEFCLASS":
self.__closeComment()
self.__flushBuffer()
f.close()
def parseLine(self, line):
"""Parse one line of python and flush the resulting output to the
outstream.
@param line the python code line to parse
"""
self.fsm.makeTransition(line)
self.__flushBuffer()
def optParse():
"""Parses commandline options."""
parser = OptionParser(prog=__applicationName__, version="%prog " + __version__)
parser.set_usage("%prog [options] filename")
parser.add_option("--autobrief",
action="store_true", dest="autobrief",
help="use the docstring summary line as \\brief description"
)
parser.add_option("--debug",
action="store_true", dest="debug",
help="enable debug output on stderr"
)
## parse options
global options
(options, filename) = parser.parse_args()
if not filename:
print >>sys.stderr, "No filename given."
sys.exit(-1)
return filename[0]
def main():
"""Starts the parser on the file given by the filename as the first
argument on the commandline.
"""
filename = optParse()
fsm = Doxypy()
fsm.parseFile(filename)
if __name__ == "__main__":
main()
|
agpl-3.0
|
rplevka/selenium
|
py/test/selenium/webdriver/common/rendered_webelement_tests.py
|
63
|
3233
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.webdriver.common.by import By
class RenderedWebElementTests(unittest.TestCase):
@pytest.mark.ignore_chrome
def testShouldPickUpStyleOfAnElement(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-parent")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(0, 128, 0, 1)", backgroundColour)
element = self.driver.find_element(by=By.ID, value="red-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(255, 0, 0, 1)", backgroundColour)
@pytest.mark.ignore_chrome
def testShouldAllowInheritedStylesToBeUsed(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs has an issue with getting the right value for background-color")
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("transparent", backgroundColour)
def testShouldCorrectlyIdentifyThatAnElementHasWidth(self):
self._loadPage("xhtmlTest")
shrinko = self.driver.find_element(by=By.ID, value="linkId")
size = shrinko.size
self.assertTrue(size["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(size["height"] > 0, "Height expected to be greater than 0")
def testShouldBeAbleToDetermineTheRectOfAnElement(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support rect command")
self._loadPage("xhtmlTest")
element = self.driver.find_element(By.ID, "username")
rect = element.rect
self.assertTrue(rect["x"] > 0, "Element should not be in the top left")
self.assertTrue(rect["y"] > 0, "Element should not be in the top left")
self.assertTrue(rect["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(rect["height"] > 0, "Height expected to be greater than 0")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
apache-2.0
|
slohse/ansible
|
lib/ansible/modules/system/alternatives.py
|
29
|
5286
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Gabe Mulley <[email protected]>
# (c) 2015, David Wittman <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
version_added: "1.6"
author:
- "David Wittman (@DavidWittman)"
- "Gabe Mulley (@mulby)"
options:
name:
description:
- The generic name of the link.
required: true
path:
description:
- The path to the real executable that the link should point to.
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is always required on RHEL-based distributions. On Debian-based distributions this option is
required when the alternative I(name) is unknown to the system.
required: false
priority:
description:
- The priority of the alternative
required: false
default: 50
version_added: "2.2"
requirements: [ update-alternatives ]
'''
EXAMPLES = '''
- name: correct java version selected
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: alternatives link created
alternatives:
name: hadoop-conf
link: /etc/hadoop/conf
path: /etc/hadoop/conf.ansible
- name: make java 32 bit an alternative with low priority
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
priority: -10
'''
import os
import re
import subprocess
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
path=dict(required=True, type='path'),
link=dict(required=False, type='path'),
priority=dict(required=False, type='int',
default=50),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
priority = params['priority']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
current_path = None
all_alternatives = []
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, _) = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
)
if rc == 0:
# Alternatives already exist for this link group
# Parse the output to determine the current path of the symlink and
# available alternatives
current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
match = current_path_regex.search(display_output)
if match:
current_path = match.group(1)
all_alternatives = alternative_regex.findall(display_output)
if not link:
# Read the current symlink target from `update-alternatives --query`
# in case we need to install the new alternative before setting it.
#
# This is only compatible on Debian-based systems, as the other
# alternatives don't have --query available
rc, query_output, _ = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
)
if rc == 0:
for line in query_output.splitlines():
if line.startswith('Link:'):
link = line.split()[1]
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
if path not in all_alternatives:
if not os.path.exists(path):
module.fail_json(msg="Specified path %s does not exist" % path)
if not link:
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
check_rc=True
)
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError as cpe:
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mapr/hue
|
desktop/core/ext-py/Django-1.6.10/tests/model_fields/test_imagefield.py
|
54
|
16032
|
from __future__ import absolute_import
import os
import shutil
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from django.core.files.images import ImageFile
from django.test import TestCase
from django.utils._os import upath
from django.utils.unittest import skipIf
try:
from .models import Image
except ImproperlyConfigured:
Image = None
if Image:
from .models import (Person, PersonWithHeight, PersonWithHeightAndWidth,
PersonDimensionsFirst, PersonTwoImages, TestImageFieldFile)
from .models import temp_storage_dir
else:
# Pillow not available, create dummy classes (tests will be skipped anyway)
class Person():
pass
PersonWithHeight = PersonWithHeightAndWidth = PersonDimensionsFirst = Person
PersonTwoImages = Person
class ImageFieldTestMixin(object):
"""
Mixin class to provide common functionality to ImageField test classes.
"""
# Person model to use for tests.
PersonModel = PersonWithHeightAndWidth
# File class to use for file instances.
File = ImageFile
def setUp(self):
"""
Creates a pristine temp directory (or deletes and recreates if it
already exists) that the model uses as its storage directory.
Sets up two ImageFile instances for use in tests.
"""
if os.path.exists(temp_storage_dir):
shutil.rmtree(temp_storage_dir)
os.mkdir(temp_storage_dir)
file_path1 = os.path.join(os.path.dirname(upath(__file__)), "4x8.png")
self.file1 = self.File(open(file_path1, 'rb'))
file_path2 = os.path.join(os.path.dirname(upath(__file__)), "8x4.png")
self.file2 = self.File(open(file_path2, 'rb'))
def tearDown(self):
"""
Removes temp directory and all its contents.
"""
shutil.rmtree(temp_storage_dir)
def check_dimensions(self, instance, width, height,
field_name='mugshot'):
"""
Asserts that the given width and height values match both the
field's height and width attributes and the height and width fields
(if defined) the image field is caching to.
Note, this method will check for dimension fields named by adding
"_width" or "_height" to the name of the ImageField. So, the
models used in these tests must have their fields named
accordingly.
By default, we check the field named "mugshot", but this can be
specified by passing the field_name parameter.
"""
field = getattr(instance, field_name)
# Check height/width attributes of field.
if width is None and height is None:
self.assertRaises(ValueError, getattr, field, 'width')
self.assertRaises(ValueError, getattr, field, 'height')
else:
self.assertEqual(field.width, width)
self.assertEqual(field.height, height)
# Check height/width fields of model, if defined.
width_field_name = field_name + '_width'
if hasattr(instance, width_field_name):
self.assertEqual(getattr(instance, width_field_name), width)
height_field_name = field_name + '_height'
if hasattr(instance, height_field_name):
self.assertEqual(getattr(instance, height_field_name), height)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests for ImageField that don't need to be run with each of the
different test model classes.
"""
def test_equal_notequal_hash(self):
"""
Bug #9786: Ensure '==' and '!=' work correctly.
Bug #9508: make sure hash() works as expected (equal items must
hash to the same value).
"""
# Create two Persons with different mugshots.
p1 = self.PersonModel(name="Joe")
p1.mugshot.save("mug", self.file1)
p2 = self.PersonModel(name="Bob")
p2.mugshot.save("mug", self.file2)
self.assertEqual(p1.mugshot == p2.mugshot, False)
self.assertEqual(p1.mugshot != p2.mugshot, True)
# Test again with an instance fetched from the db.
p1_db = self.PersonModel.objects.get(name="Joe")
self.assertEqual(p1_db.mugshot == p2.mugshot, False)
self.assertEqual(p1_db.mugshot != p2.mugshot, True)
# Instance from db should match the local instance.
self.assertEqual(p1_db.mugshot == p1.mugshot, True)
self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot))
self.assertEqual(p1_db.mugshot != p1.mugshot, False)
def test_instantiate_missing(self):
"""
If the underlying file is unavailable, still create instantiate the
object without error.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
p = self.PersonModel.objects.get(name="Joan")
path = p.mugshot.path
shutil.move(path, path + '.moved')
p2 = self.PersonModel.objects.get(name="Joan")
def test_delete_when_missing(self):
"""
Bug #8175: correctly delete an object where the file no longer
exists on the file system.
"""
p = self.PersonModel(name="Fred")
p.mugshot.save("shot", self.file1)
os.remove(p.mugshot.path)
p.delete()
def test_size_method(self):
"""
Bug #8534: FileField.size should not leave the file open.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
# Get a "clean" model instance
p = self.PersonModel.objects.get(name="Joan")
# It won't have an opened file.
self.assertEqual(p.mugshot.closed, True)
# After asking for the size, the file should still be closed.
_ = p.mugshot.size
self.assertEqual(p.mugshot.closed, True)
def test_pickle(self):
"""
Tests that ImageField can be pickled, unpickled, and that the
image of the unpickled version is the same as the original.
"""
import pickle
p = Person(name="Joe")
p.mugshot.save("mug", self.file1)
dump = pickle.dumps(p)
p2 = Person(name="Bob")
p2.mugshot = self.file1
loaded_p = pickle.loads(dump)
self.assertEqual(p.mugshot, loaded_p.mugshot)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTwoDimensionsTests(ImageFieldTestMixin, TestCase):
"""
Tests behavior of an ImageField and its dimensions fields.
"""
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
p.save()
self.check_dimensions(p, 4, 8)
def test_image_after_constructor(self):
"""
Tests behavior when image is not passed in constructor.
"""
p = self.PersonModel(name='Joe')
# TestImageField value will default to being an instance of its
# attr_class, a TestImageFieldFile, with name == None, which will
# cause it to evaluate as False.
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
# Test setting a fresh created model instance.
p = self.PersonModel(name='Joe')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8)
def test_create(self):
"""
Tests assigning an image in Manager.create().
"""
p = self.PersonModel.objects.create(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
def test_default_value(self):
"""
Tests that the default value for an ImageField is an instance of
the field's attr_class (TestImageFieldFile in this case) with no
name (name set to None).
"""
p = self.PersonModel()
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
def test_assignment_to_None(self):
"""
Tests that assigning ImageField to None clears dimensions.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
# If image assigned to None, dimension fields should be cleared.
p.mugshot = None
self.check_dimensions(p, None, None)
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
def test_field_save_and_delete_methods(self):
"""
Tests assignment using the field's save method and deletion using
the field's delete method.
"""
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# A new file should update dimensions.
p.mugshot.save("mug", self.file2)
self.check_dimensions(p, 8, 4)
# Field and dimensions should be cleared after a delete.
p.mugshot.delete(save=False)
self.assertEqual(p.mugshot, None)
self.check_dimensions(p, None, None)
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set if file is saved.
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.check_dimensions(p, 4, 8)
# After checking dimensions on the image field, the file will have
# opened.
self.assertEqual(p.mugshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
self.check_dimensions(p, 4, 8)
self.assertEqual(p.mugshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldNoDimensionsTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with no dimension fields.
"""
PersonModel = Person
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldOneDimensionTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with one dimensions field.
"""
PersonModel = PersonWithHeight
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldDimensionsFirstTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField where the dimensions fields are
defined before the ImageField.
"""
PersonModel = PersonDimensionsFirst
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldUsingFileTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField when assigning it a File instance
rather than an ImageFile instance.
"""
PersonModel = PersonDimensionsFirst
File = File
@skipIf(Image is None, "PIL is required to test ImageField")
class TwoImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests a model with two ImageFields.
"""
PersonModel = PersonTwoImages
def test_constructor(self):
p = self.PersonModel(mugshot=self.file1, headshot=self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.save()
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
def test_create(self):
p = self.PersonModel.objects.create(mugshot=self.file1,
headshot=self.file2)
self.check_dimensions(p, 4, 8)
self.check_dimensions(p, 8, 4, 'headshot')
def test_assignment(self):
p = self.PersonModel()
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot = self.file2
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Clear the ImageFields one at a time.
p.mugshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.headshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_field_save_and_delete_methods(self):
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# We can use save=True when deleting the image field with null=True
# dimension fields and the other field has an image.
p.headshot.delete(save=True)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot.delete(save=False)
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set for the saved file.
p.mugshot.save("mug", self.file1)
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# After checking dimensions on the image fields, the files will
# have been opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
p.headshot.was_opened = False
self.check_dimensions(p, 4, 8,'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
p.headshot = self.file1
self.check_dimensions(p, 8, 4, 'mugshot')
self.check_dimensions(p, 4, 8, 'headshot')
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
|
apache-2.0
|
rfinn/LCS
|
paper1code/LCSreadmaster.v2.py
|
1
|
46244
|
#!/usr/bin/env python
import pyfits
from LCScommon import *
from pylab import *
import os
import mystuff as my
#these correpond to area w/more uniform covereage
MKW824um=array([220.16377,3.4883817,1.3137727,2.5,12.7456],'f')
MKW1124um=array([202.36305,11.746882,1.2454248,2.9,206.4],'f')
NGC24um=array([244.30994,34.933704,1.2865442,2.5,321.317],'f')
def drawbox(data,style):#feed in center x,y,dx,dy,rotation E of N
#xcoords of unrotated box, going around CCW
xl=array([data[0]-0.5*data[2],data[0]+0.5*data[2],data[0]+0.5*data[2],data[0]-0.5*data[2],data[0]-0.5*data[2]],'d')
yl=array([data[1]-0.5*data[3],data[1]-0.5*data[3],data[1]+0.5*data[3],data[1]+0.5*data[3],data[1]-0.5*data[3] ],'d')
xl=array([-0.5*data[2],+0.5*data[2],+0.5*data[2],-0.5*data[2],-0.5*data[2]],'d')
yl=array([-0.5*data[3],-0.5*data[3],+0.5*data[3],+0.5*data[3],-0.5*data[3] ],'d')
ang=data[4]*pi/180.*-1.#convert rotation to radians
#rotate coordinates
xp=cos(ang)*xl-sin(ang)*yl
yp=sin(ang)*xl+cos(ang)*yl
#put back on absolute scale
xp=data[0]+xp
yp=data[1]+yp
#draw rotated box
plot(xp,yp,style)
def calcC90(x,y,yerr,fluxencl):#radius("),intensity,error,
#find average using last 3 pts, which should approximate sky
sky=mean(y[len(y)-3:len(y)])
#subtract sky from y
sy=y-sky
#multiply y by r**2 to account for increasing area
toty=y*(x**2)
#sum r=0-36arcsec to get total flux
totygood=toty[0:len(y)-3]
rgood=x[0:len(y)-3]
totflux=sum(totygood)
#start summing from 39arcsec inward, until it reaches 10% of total flux
sum10=0
tenpercent=.1*totflux
thirty=.3*totflux
for i in range(len(totygood)):
index=len(totygood)-1-i
sum10 += totygood[index]
#print sky,sum10,tenpercent,totflux
if sum10 > tenpercent:
r90=rgood[index]
break
#calculate r30
sum30=0
for i in range(len(totygood)):
sum30 += totygood[i]
if sum30 > thirty:
r30=rgood[i]
break
##calculate r90 using enclosed flux array
#find max of array
maxEncFlux=max(fluxencl)
indexMax=where((fluxencl == maxEncFlux))
#break index out of array and into a plain integer
indexMax=indexMax[0]
#use index of max of array and move inward until value is .9 max
transitionFlux=0.9*maxEncFlux
for i in range(indexMax):
index=indexMax-i
if fluxencl[index] < transitionFlux:
r90FromEncFlux=x[index]
break
#calculate r30FromEncFlux
transitionFlux=0.3*maxEncFlux
for i in range(indexMax):
index=i
if fluxencl[index] > transitionFlux:
r30FromEncFlux=x[index]
break
#return radius at C90 (the radius that encloses 90% of the light) and sky
try:
return r90,sky,r90FromEncFlux,maxEncFlux,r30,r30FromEncFlux
except UnboundLocalError:
print "Warning: Could not find R90"
try:
return 0,sky,r90FromEncFlux,maxEncFlux,r30,r30FromEncFlux
except UnboundLocalError:
print "Warning: Could not find R30 From Enc Flux"
return 0,sky,r90FromEncFlux,maxEncFlux,r30,0
class cluster:
def __init__(self,clustername):
#Get current path so program can tell if this is being run on Becky or Rose's computer
self.prefix=clustername
self.cra=clusterRA[self.prefix]
self.cdec=clusterDec[self.prefix]
self.cz=clusterz[self.prefix]
self.biweightvel=clustercbi[self.prefix]
self.biweightscale=clustersbi[self.prefix]
self.r200=2.02*(self.biweightscale)/1000./sqrt(OmegaL+OmegaM*(1.+self.cz)**3)*H0/70. # in Mpc
self.r200deg=self.r200*1000./my.DA(self.cz,h)/3600.
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
infile='/Users/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
infile='/home/rfinn/research/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
homedir='/home/rfinn/'
#infile='/home/rfinn/LocalClusters/MasterTables/'+clustername+'mastertable.fits'
tb=pyfits.open(infile)
tbdata=tb[1].data
tb.close()
self.agcflag=tbdata.field('AGCflag')
self.HIflag=tbdata.field('HIFLAG')
self.sdssflag=tbdata.field('SDSSflag')
self.sdssphotflag=tbdata.field('SDSSphotflag')
self.mpaflag=tbdata.field('MPAFLAG')
self.apexflag=tbdata.field('APEXFLAG')
self.sexsdssflag=tbdata.field('SEXSDSSflag')
self.sex24flag=tbdata.field('SEX24FLAG')
self.agcvoptflag=tbdata.field('AGCVOPTFLAG')
self.agcnumber=tbdata.field('AGCNUMBER')
self.raagc=tbdata.field('AGCRA')
self.decagc=tbdata.field('AGCDEC')
self.a100=tbdata.field('A100')
self.b100=tbdata.field('B100')
self.mag10=tbdata.field('MAG10')
self.posang=tbdata.field('POSANG')
self.bsteintype=tbdata.field('BSTEINTYPE')
self.vopt=tbdata.field('VOPT')
self.verr=tbdata.field('VERR')
self.vsource=tbdata.field('VSOURCE')
self.flux100=tbdata.field('FLUX100')
self.rms100=tbdata.field('RMS100')
self.v21=tbdata.field('V21')
self.width=tbdata.field('WIDTH')
self.widtherr=tbdata.field('WIDTHERR')
#sdss info
self.sdssra=tbdata.field('SDSSRA')
self.sdssdec=tbdata.field('SDSSDEC')
self.sdssphotra=tbdata.field('SDSSphotRA')
self.sdssphotdec=tbdata.field('SDSSphotDEC')
self.sdssu=tbdata.field('SDSSU')
self.sdssg=tbdata.field('SDSSG')
self.sdssr=tbdata.field('SDSSR')
self.sdssi=tbdata.field('SDSSI')
self.sdssz=tbdata.field('SDSSZ')
self.sdssspecz=tbdata.field('SDSSSPECZ')
self.sdssvopt=tbdata.field('SDSSVOPT')
self.sdsshaew=tbdata.field('SDSSHAEW')
self.sdsshaewerr=tbdata.field('SDSSHAEWERR')
self.sdssplate=tbdata.field('SDSSPLATE')
self.sdssfiberid=tbdata.field('SDSSFIBERID')
self.sdsstile=tbdata.field('SDSSTILE')
self.mpahalpha=tbdata.field('MPAHALPHA')
self.mpahbeta=tbdata.field('MPAHBETA')
self.mpao3=tbdata.field('MPAOIII')
self.mpan2=tbdata.field('MPANII')
#sextractor info
self.numberser=tbdata.field('NUMBERSER')
self.ximageser=tbdata.field('XIMAGESER')
self.yimageser=tbdata.field('YIMAGESER')
self.xminimageser=tbdata.field('XMINIMAGESER')
self.xmaximageser=tbdata.field('XMAXIMAGESER')
self.yminimageser=tbdata.field('YMINIMAGESER')
self.raser=tbdata.field('RASER')
self.decser=tbdata.field('DECSER')
self.fluxisoser=tbdata.field('FLUXISOSER')
self.fluxerrisoser=tbdata.field('FLUXERRISOSER')
self.magisoser=tbdata.field('MAGISOSER')
self.magerrisoser=tbdata.field('MAGERRISOSER')
self.fluxautoser=tbdata.field('FLUXAUTOSER')
self.fluxerrautoser=tbdata.field('FLUXERRAUTOSER')
self.magautoser=tbdata.field('MAGAUTOSER')
self.magerrautoser=tbdata.field('MAGERRAUTOSER')
self.fluxpetroser=tbdata.field('FLUXPETROSER')
self.fluxerrpetroser=tbdata.field('FLUXERRPETROSER')
self.magpetroser=tbdata.field('MAGPETROSER')
self.magerrpetroser=tbdata.field('MAGERRPETROSER')
self.kronradser=tbdata.field('KRONRADSER')#kron radius
self.petroradser=tbdata.field('PETRORADSER')#petrosian radius
self.fluxradser=tbdata.field('FLUXRADSER')#1/2 light radius
self.isoareaser=tbdata.field('ISOAREASER')
self.aworldser=tbdata.field('AWORLDSER')
self.bworldser=tbdata.field('BWORLDSER')
self.thetaser=tbdata.field('THETASER')
self.errthetaser=tbdata.field('ERRTHETASER')
self.thetaj2000ser=tbdata.field('THETAJ2000SER')
self.errthetaj2000ser=tbdata.field('ERRTHETAJ2000SER')
self.elongser=tbdata.field('ELONGATIONSER')
self.elliptser=tbdata.field('ELLIPTICITYSER')
self.fwhmser=tbdata.field('FWHMSER')
self.flagsser=tbdata.field('FLAGSSER')
self.classstarser=tbdata.field('CLASSSTARSER')
#SEXTRACTOR output 24 micron data
self.numberse24=tbdata.field('NUMBERSE24')
self.ximagese24=tbdata.field('XIMAGESE24')
self.yimagese24=tbdata.field('YIMAGESE24')
self.xminimagese24=tbdata.field('XMINIMAGESE24')
self.xmaximagese24=tbdata.field('XMAXIMAGESE24')
self.xminimagese24=tbdata.field('YMINIMAGESE24')
self.rase24=tbdata.field('RASE24')
self.decse24=tbdata.field('DECSE24')
self.fluxisose24=tbdata.field('FLUXISOSE24')
self.fluxerrisose24=tbdata.field('FLUXERRISOSE24')
self.magisose24=tbdata.field('MAGISOSE24')
self.magerrisose24=tbdata.field('MAGERRISOSE24')
self.fluxautose24=tbdata.field('FLUXAUTOSE24')
self.fluxerrautose24=tbdata.field('FLUXERRAUTOSE24')
self.magautose24=tbdata.field('MAGAUTOSE24')
self.magerrautose24=tbdata.field('MAGERRAUTOSE24')
self.fluxpetrose24=tbdata.field('FLUXPETROSE24')
self.fluxerrpetrose24=tbdata.field('FLUXERRPETROSE24')
self.magpetrose24=tbdata.field('MAGPETROSE24')
self.magerrpetrose24=tbdata.field('MAGERRPETROSE24')
self.kronradse24=tbdata.field('KRONRADSE24')
self.petroradse24=tbdata.field('PETRORADSE24')
self.fluxradse24=tbdata.field('FLUXRADSE24')
self.isoarease24=tbdata.field('ISOAREASE24')
self.aworldse24=tbdata.field('AWORLDSE24')
self.bworldse24=tbdata.field('BWORLDSE24')
self.thetase24=tbdata.field('THETASE24')
self.errthetase24=tbdata.field('ERRTHETASE24')
self.thetaj2000se24=tbdata.field('THETAJ2000SE24')
self.errthetaj2000se24=tbdata.field('ERRTHETAJ2000SE24')
self.elongse24=tbdata.field('ELONGATIONSE24')
self.elliptse24=tbdata.field('ELLIPTICITYSE24')
self.fwhmse24=tbdata.field('FWHMSE24')
self.flagsse24=tbdata.field('FLAGSSE24')
self.classstarse24=tbdata.field('CLASSSTARSE24')
self.f24dist=self.fluxautose24[self.sex24flag]
#apex output
self.mipsra=tbdata.field('MIPSRA')
self.mipsdec=tbdata.field('MIPSDEC')
self.mipsflux=tbdata.field('MIPSFLUX')
self.mipsfluxerr=tbdata.field('MIPSFLUXERR')
self.mipssnr=tbdata.field('MIPSSNR')
self.mipsdeblend=tbdata.field('MIPSDEBLEND')
self.mipsfluxap1=tbdata.field('MIPSFLUXAP1')
self.mipsfluxap1err=tbdata.field('MIPSFLUXAP1ERR')
self.mipsfluxap2=tbdata.field('MIPSFLUXAP2')
self.mipsfluxap2err=tbdata.field('MIPSFLUXAP2ERR')
self.mipsfluxap3=tbdata.field('MIPSFLUXAP3')
self.mipsfluxap4err=tbdata.field('MIPSFLUXAP3ERR')
self.On24ImageFlag=tbdata.field('On24ImageFlag')
self.supervopt=tbdata.field('SUPERVOPT')
self.ra=tbdata.field('SUPERRA')
self.dec=tbdata.field('SUPERDEC')
self.stellarmass=tbdata.field('STELLARMASS')
self.sdssMu=tbdata.field('SDSSMU')
self.sdssLu=tbdata.field('SDSSLU')
self.sdssMg=tbdata.field('SDSSMG')
self.sdssLg=tbdata.field('SDSSLG')
self.sdssMr=tbdata.field('SDSSMR')
self.sdssLr=tbdata.field('SDSSLR')
self.sdssMi=tbdata.field('SDSSMI')
self.sdssLi=tbdata.field('SDSSLI')
self.sdssMz=tbdata.field('SDSSMZ')
self.sdssLz=tbdata.field('SDSSLZ')
self.membflag =tbdata.field('MEMBFLAG')
self.morphflag =tbdata.field('MORPHFLAG')
self.morph =tbdata.field('MORPH')
self.disturb =tbdata.field('DISTURB')
self.localdens =tbdata.field('LOCALDENS')
self.agn1 =tbdata.field('AGNKAUFF')
self.agn2 =tbdata.field('AGNKEWLEY')
self.agn3 =tbdata.field('AGNSTASIN')
self.logn2halpha=log10(self.mpan2/self.mpahalpha)
self.logo3hbeta=log10(self.mpao3/self.mpahbeta)
self.ellipseflag24 =tbdata.field('ELLIPSEFLAG24')
self.ellipseflagsdss =tbdata.field('ELLIPSEFLAGSDSS')
self.ellipseflag =tbdata.field('ELLIPSEFLAG')
# galaxy zoo fields
self.galzooflag =tbdata.field('GALZOOFLAG')
self.galzoonvote =tbdata.field('GALZOONVOTE')
self.galzoopel =tbdata.field('GALZOOPEL')
self.galzoopcw =tbdata.field('GALZOOPCW')
self.galzoopacw =tbdata.field('GALZOOPACW')
self.galzoopedge =tbdata.field('GALZOOPEDGE')
self.galzoopdk =tbdata.field('GALZOOPDK')
self.galzoopmg =tbdata.field('GALZOOPMG')
self.galzoopcs =tbdata.field('GALZOOPCS')
self.galzoopeldebiased =tbdata.field('GALZOOPELDEBIASED')
self.galzoopcsdebiased =tbdata.field('GALZOOPCSDEBIASED')
self.galzoospiral =tbdata.field('GALZOOSPIRAL')
self.galzooelliptical =tbdata.field('GALZOOELLIPTICAL')
self.galzoouncertain =tbdata.field('GALZOOUNCERTAIN')
#new SDSS fields that quantify radial extent of galaxy
self.sdssIsoAr =tbdata.field('SDSSISOAR')
self.sdssIsoBr =tbdata.field('SDSSISOBR')
self.sdssIsoPhir =tbdata.field('SDSSISOPHIR')
self.sdssIsoPhirErr =tbdata.field('SDSSISOPHIERRR')
self.sdssExpRadr =tbdata.field('SDSSEXPRADR')
self.sdssExpABr =tbdata.field('SDSSEXPABR')
self.sdssExpABrErr =tbdata.field('SDSSEXPABRERR')
self.sdssExpPhir =tbdata.field('SDSSEXPPHIR')
self.sdssExpPhirErr =tbdata.field('SDSSEXPPHIERRR')
self.sdssumag=tbdata.field('SDSSDEREDU')#de-redened magnitudes
self.sdssgmag=tbdata.field('SDSDEREDSG')
self.sdssrmag=tbdata.field('SDSSDEREDR')
self.sdssimag=tbdata.field('SDSSDEREDI')
self.sdsszmag=tbdata.field('SDSSDEREDZ')
#end of master table!
#self.spiralFlag=self.On24ImageFlag & self.galzooflag & self.ellipseflag & (self.galzoopcsdebiased > 0.6)
self.spiralFlag=self.On24ImageFlag & self.galzooflag & self.ellipseflag & self.galzoospiral
self.clustername=clustername
self.clusterra=clusterRA[clustername]
self.clusterdec=clusterDec[clustername]
self.dr=sqrt((self.ra-self.clusterra)**2+(self.dec-self.clusterdec)**2)
self.drR200=self.dr/self.r200deg
self.clustervel=clustervel[clustername]
self.clustersigma=clustersigma[clustername]
self.clustervmin=self.clustervel-3.*self.clustersigma
self.clustervmax=self.clustervel+3.*self.clustersigma
self.dist=sqrt((self.clusterra-self.ra)**2 + (self.clusterdec-self.dec)**2)
self.flagHI = (self.flux100 > 0.)
self.flagmemb = ((self.vopt > self.clustervmin) & (self.vopt < self.clustervmax)) | ((self.v21 > self.clustervmin) & (self.v21 < self.clustervmax))
self.allvelocity=3.e5*self.sdssspecz
for i in range(len(self.allvelocity)):
if self.sdssflag[i] < 1:
if self.v21[i] > 0:
self.allvelocity[i]=self.v21[i]
else:
self.allvelocity[i]=self.vopt[i]
self.nmemb=len(self.dist[self.membflag & self.On24ImageFlag])
self.nfield=len(self.dist[self.On24ImageFlag])-self.nmemb
print self.clustername,": ","N members = ",self.nmemb," N field = ",self.nfield
print ' N spirals = ',sum(self.spiralFlag),' Nspiral members = ',sum(self.spiralFlag&self.membflag)
def plotagn(self):
figure()
clf()
plot(self.logn2halpha,self.logo3hbeta,'k.')
plot(self.logn2halpha[self.agn2],self.logo3hbeta[self.agn2],'co',markersize=12)
plot(self.logn2halpha[self.agn1],self.logo3hbeta[self.agn1],'go',markersize=8)
plot(self.logn2halpha[self.agn3],self.logo3hbeta[self.agn3],'ro',markersize=4)
#draw AGN diagnostic lines
x=arange(-3,1,.01)
y=(.61/(x-.47)+1.19)#Kewley
plot(x,y,'c')
y =(.61/(x-.05)+1.3)#Kauffman 2003?
plot(x,y,'g')
y = ((-30.787+(1.1358*x)+((.27297)*(x)**2))*tanh(5.7409*x))-31.093 #Stasinska 2006
plot(x,y,'r')
axis([-3,1.,-2,2])
def getFilesForProfileFitting(self):
outfile1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.24.dat'
outfile2=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.sdss.dat'
outfile3=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.24.dat'
outfile4=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.sdss.dat'
agcSpiral=self.agcnumber[self.spiralFlag]
out1=open(outfile1,'w')
out2=open(outfile2,'w')
out3=open(outfile3,'w')
out4=open(outfile4,'w')
cutoutpath=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'
for i in range(len(agcSpiral)):
outim=cutoutpath+self.prefix+'-'+str(agcSpiral[i])+'-cutout-sdss.fits \n'
outim24=cutoutpath+self.prefix+'-'+str(agcSpiral[i])+'-cutout-24-rot.fits \n'
out1.write(outim24)
out2.write(outim)
outfile=homedir+'research/LocalClusters/EllipseTables/'+self.prefix+'/'+self.prefix+'-'+str(agcSpiral[i])+'-cutout-sdss.dat \n'
outfile24=homedir+'research/LocalClusters/EllipseTables/'+self.prefix+'/'+self.prefix+'-'+str(agcSpiral[i])+'-cutout-24-rot.dat \n'
out3.write(outfile24)
out4.write(outfile)
out1.close()
out2.close()
out3.close()
out4.close()
def plotpositions(self):
#figure()
#clf()
#draw footprint of mips data, if applicable
#if self.clustername.find('MKW8') > -1:
# drawbox(MKW824um,'r-')
#if self.clustername.find('MKW11') > -1:
# drawbox(MKW1124um,'r-')
#if self.clustername.find('NGC6107') > -1:
# drawbox(NGC24um,'r-')
#scatter(ra[flag],dec[flag],s=(20-agcmag10[flag]/10)*20+20,color='.8')
#plot(ra[flag],dec[flag],'k.')
plot(self.ra[self.sdssflag],self.dec[self.sdssflag],'k.', alpha=0.5,markersize=4,label='SDSS')
plot(self.ra[self.HIflag],self.dec[self.HIflag],'bo', markerfacecolor='None',markeredgecolor='b',markersize=6,label='HI')
plot(self.ra[self.apexflag],self.dec[self.apexflag],'ro', markerfacecolor='r',markeredgecolor='b',markersize=4,label='24um')
#plot(ra[flag],dec[flag],'k.')
plot(array([self.clusterra]),array([self.clusterdec]),'kx',markersize=15,lw=8)#mark cluster center with a red x
#legend(loc='upper right',numpoints=1)
title(self.clustername,fontsize=12)
#axis([groupra[i]+dr,groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
axis('equal')
drawbox(cluster24Box[self.clustername],'g-')
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax+1,2,'i'),fontsize=10)
ymin,ymax=ylim()
yticks(arange(round(ymin),ymax+1,2,'i'),fontsize=10)
#axis(groupra[i]+dr,[groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#s=self.clustername+'.eps'
#savefig(s)
def plotpositionson24(self):
plot(self.ra[self.sdssflag & self.On24ImageFlag],self.dec[self.sdssflag& self.On24ImageFlag],'k.', alpha=0.5,markersize=4,label='SDSS')
plot(self.ra[self.HIflag & self.On24ImageFlag],self.dec[self.HIflag & self.On24ImageFlag],'bo', markerfacecolor='None',markeredgecolor='b',markersize=6,label='HI')
plot(self.ra[self.apexflag],self.dec[self.apexflag],'ro', markerfacecolor='r',markeredgecolor='b',markersize=4,label='24um')
#plot(ra[flag],dec[flag],'k.')
plot(array([self.clusterra]),array([self.clusterdec]),'kx',markersize=15,lw=8)#mark cluster center with a red x
#legend(loc='upper right',numpoints=1)
title(self.clustername,fontsize=12)
#axis([groupra[i]+dr,groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#axis('equal')
drawbox(cluster24Box[self.clustername],'g-')
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax,1,'i'),fontsize=10)
ymin,ymax=ylim()
yticks(arange(round(ymin),ymax,1,'i'),fontsize=10)
#axis(groupra[i]+dr,[groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#s=self.clustername+'.eps'
#savefig(s)
def plotrelativepositionson24(self):
plot(self.ra[self.sdssflag & self.On24ImageFlag]-self.clusterra,self.dec[self.sdssflag& self.On24ImageFlag]-self.clusterdec,'k.', alpha=0.5,markersize=4,label='SDSS')
plot(self.ra[self.HIflag & self.On24ImageFlag]-self.clusterra,self.dec[self.HIflag & self.On24ImageFlag]-self.clusterdec,'bo', markerfacecolor='None',markeredgecolor='b',markersize=6,label='HI')
plot(self.ra[self.apexflag]-self.clusterra,self.dec[self.apexflag]-self.clusterdec,'ro', markerfacecolor='r',markeredgecolor='b',markersize=4,label='24um')
#plot(ra[flag],dec[flag],'k.')
plot(array([0]),array([0]),'kx',markersize=15,lw=8,label='_nolegend_')#mark cluster center with a red x
#legend(loc='upper right',numpoints=1)
title(self.clustername,fontsize=12)
#axis([groupra[i]+dr,groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#axis('equal')
drawbox(cluster24Box[self.clustername]-array([self.clusterra,self.clusterdec,0,0,0]),'g-')
axis([-1.5,1.5,-2.,2.])
xmin,xmax=xlim()
xticks(arange(-1,2,1,'i'),fontsize=10)
ymin,ymax=ylim()
yticks(arange(-2,3,1,'i'),fontsize=10)
#axis(groupra[i]+dr,[groupra[i]-dr,groupdec[i]-dr,groupdec[i]+dr])
#s=self.clustername+'.eps'
#savefig(s)
def plotveldron24(self):
dr=sqrt((self.ra-self.clusterra)**2+(self.dec-self.clusterdec)**2)
dv=self.supervopt-self.biweightvel
membflag=(dr/self.r200deg < 1) & (abs(dv) < 3.*self.biweightscale)
plot(dr[self.On24ImageFlag],self.supervopt[self.On24ImageFlag],'k.',markersize=3)
plot(dr[membflag & self.On24ImageFlag],self.supervopt[membflag & self.On24ImageFlag],'bo',markersize=4)
ymin=3500
ymax=14000
axis([0,2,ymin,ymax])
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax+1,1,'i'),fontsize=10)
yticks(arange(4000,ymax,4000,'i'),fontsize=10)
title(self.clustername,fontsize=12)
axhline(self.biweightvel,ls='-',color='r')
axhline(self.biweightvel+3*self.biweightscale,ls='--',color='r')
axhline(self.biweightvel-3*self.biweightscale,ls='--',color='r')
def plotveldr(self):
dr=sqrt((self.ra-self.clusterra)**2+(self.dec-self.clusterdec)**2)
dv=self.supervopt-self.biweightvel
membflag=(dr/self.r200deg < 1) & (abs(dv) < 3.*self.biweightscale)
plot(dr,self.supervopt,'k.',markersize=3)
plot(dr[membflag],self.supervopt[membflag],'bo',markersize=6)
ymin=3500
ymax=14000
axis([0,3,ymin,ymax])
xmin,xmax=xlim()
xticks(arange(round(xmin),xmax+1,1,'i'),fontsize=10)
yticks(arange(4000,ymax,4000,'i'),fontsize=10)
title(self.clustername,fontsize=12)
axhline(self.biweightvel,ls='-',color='r')
axhline(self.biweightvel+3*self.biweightscale,ls='--',color='r')
axhline(self.biweightvel-3*self.biweightscale,ls='--',color='r')
def plotvelhist(self):
figure()
bins=30
x1=self.allvelocity
(yhist,xhist,patches)=hist(x1,bins)
xhist=xhist[0:len(xhist)-1]+0.5*(xhist[1]-xhist[0])
mymean= average(x1)
mystd=std(x1)
print mystd
norm=max(yhist)
xmin=3000
xmax=15000
xplot=arange(xmin,xmax,50)
y1=norm*exp(-((xplot -self.clustervel)**2)/(2*self.clustersigma**2))
plot(xplot,y1,'r-')
xlabel('Recession Velocity ')
xlim(xmin,xmax)
axvline(self.clustervel,ymin=0,ymax=60,color='r')
def plotlf(self):
figure(1)
y=hist(self.f24dist,histtype='step')
ngal=y[0]
x=y[1]
xbin=zeros(len(ngal))
for i in range(len(xbin)):
xbin[i]=0.5*(x[i]+x[i+1])
#clf()
self.xbin=xbin
self.ngal=ngal
figure(2)
plot(xbin,ngal,'ro')
errorbar(xbin,ngal,sqrt(ngal))
ax=gca()
ax.set_yscale('log')
ax.set_xscale('log')
xlabel('24um Flux')
def checkmorph(self):#print out files that our class and burstein type disagree or no burstein type
flag=(self.morph==3)
self.summerSpirals=self.agcnumber[flag]
bflag=((self.bsteintype>=120)&(self.bsteintype<183))|((self.bsteintype>=300)&(self.bsteintype<400))
self.bflag=bflag
funnyflag= ((~(flag) & bflag) | (flag & ~bflag)) & self.morphflag & (self.bsteintype > 0)
self.funnySpirals=self.agcnumber[funnyflag]
self.ourtype=self.morph[funnyflag]
self.theirtype=self.bsteintype[funnyflag]
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.funnySpirals.dat'
outfile=open(s,'w')
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.funnySpirals'
outfile2=open(s,'w')
for i in range(len(self.funnySpirals)):
if self.prefix in 'Coma Hercules A1367':
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss.fits \n'
else:
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss-g.fits \n'
outfile.write(name)
name=str(self.funnySpirals[i])+' '+str(self.ourtype[i])+' '+str(self.theirtype[i])+' \n'
outfile2.write(name)
outfile.close()
outfile2.close()
nobflag= ((self.bsteintype == 0) & (self.On24ImageFlag))
self.funnySpirals=self.agcnumber[nobflag]
self.ourtype=self.morph[nobflag]
self.theirtype=self.bsteintype[nobflag]
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noBsteinSpirals.dat'
outfile=open(s,'w')
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noBsteinSpirals'
outfile2=open(s,'w')
for i in range(len(self.funnySpirals)):
if self.prefix in 'Coma Hercules A1367':
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss.fits \n'
else:
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss-g.fits \n'
outfile.write(name)
name=str(self.funnySpirals[i])+' '+str(self.ourtype[i])+' '+str(self.theirtype[i])+' \n'
outfile2.write(name)
outfile.close()
outfile2.close()
missmorphflag= ((self.morphflag == 0) & (self.On24ImageFlag))
self.funnySpirals=self.agcnumber[missmorphflag]
self.ourtype=self.morph[missmorphflag]
self.theirtype=self.bsteintype[missmorphflag]
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noMorph.dat'
outfile=open(s,'w')
s=homedir+'research/LocalClusters/MorphologyF2011/'+self.prefix+'.noMorph'
outfile2=open(s,'w')
for i in range(len(self.funnySpirals)):
if self.prefix in 'Coma Hercules A1367':
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss.fits \n'
else:
name=homedir+'research/LocalClusters/cutouts/'+self.prefix+'/'+self.prefix+'-'+str(self.funnySpirals[i])+'-cutout-sdss-g.fits \n'
outfile.write(name)
name=str(self.funnySpirals[i])+' '+str(self.ourtype[i])+' '+str(self.theirtype[i])+' \n'
outfile2.write(name)
outfile.close()
outfile2.close()
def fitprofiles(self):
#get list from LCSreadmaster.py
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.sdss.dat'
infile1=open(inf1,'r')
sfiles=[]
for line in infile1:
t=line.rstrip()
sfiles.append(t)
#print t
infile1.close()
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.24.dat'
infile1=open(inf1,'r')
s24files=[]
for line in infile1:
t=line.rstrip()
s24files.append(t)
#print t
infile1.close()
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.sdss.dat'
infile1=open(inf1,'r')
simages=[]
for line in infile1:
t=line.rstrip()
simages.append(t)
#print t
infile1.close()
inf1=homedir+'research/LocalClusters/ProfileFitting/'+self.prefix+'Spirals.Images.24.dat'
infile1=open(inf1,'r')
s24images=[]
for line in infile1:
t=line.rstrip()
s24images.append(t)
#print t
infile1.close()
pscale24=2.45#arcsec per pixel
pscalesdss=1.#arcsec per pixel
nrow=2
ncol=4
xticksize=10
yticksize=10
#for i in range(0,len(sfiles),nrow):
ngal=0
ngaltot=1.*len(sfiles)
ratio=ngaltot/((nrow*ncol)/8.)
npage=round(ratio)
if ratio > npage:
npage += 1
npage=ngaltot
#print "Ngal = ",ngaltot
#print "Npage = ",npage
redshift=(self.supervopt[self.spiralFlag]-self.clustervel)/self.clustersigma
member=self.flagmemb[self.spiralFlag]
dr=self.drR200[self.spiralFlag]
index=arange(len(self.spiralFlag))
spiralIndex=index[self.spiralFlag]
vminsdss=-400
vmaxsdss=100
vmin24=-2.1
vmax24=.5
self.r0SDSS=zeros(len(spiralIndex),'f')#scale length from exponential fit
self.r30SDSS=zeros(len(spiralIndex),'f')
self.r90SDSS=zeros(len(spiralIndex),'f')
self.skySDSS=zeros(len(spiralIndex),'f')
self.r30EncFluxSDSS=zeros(len(spiralIndex),'f')
self.r90EncFluxSDSS=zeros(len(spiralIndex),'f')
self.MaxEncFluxSDSS=zeros(len(spiralIndex),'f')
#same array for 24um
self.r0F24=zeros(len(spiralIndex),'f')#scale length from exponential fit
self.r30F24=zeros(len(spiralIndex),'f')
self.r90F24=zeros(len(spiralIndex),'f')
self.skyF24=zeros(len(spiralIndex),'f')
self.r30EncFluxF24=zeros(len(spiralIndex),'f')
self.r90EncFluxF24=zeros(len(spiralIndex),'f')
self.MaxEncFluxF24=zeros(len(spiralIndex),'f')
for i in range(int(npage)):
figure(figsize=(15,5))
subplots_adjust(left=0.05, right=.95,bottom=.1,top=0.9,wspace=0.4,hspace=0.6)
clf()
print 'ngal = ',ngal
for j in range(0,ncol*nrow,8):
t=sfiles[ngal]
t1=t.split('/')
t2=t1[len(t1)-1].split('-')
if len(t2) > 4:
galname='-'+t2[2]
else:
galname=t2[1]
t=s24files[ngal]
t1=t.split('/')
t2=t1[len(t1)-1].split('-')
if len(t2) > 5:
galname24='-'+t2[2]
else:
galname24=t2[1]
subplot(nrow,ncol,j+1)#sdss image
fits=pyfits.open(simages[ngal])
im=fits[0].data.copy()
fits.close()
axis('equal')
imshow(-1.*(im),interpolation='nearest',origin='upper',vmin=vminsdss,vmax=vmaxsdss,cmap=cm.Greys)
ax=gca()
ax.set_yticklabels(([]))
ax.set_xticklabels(([]))
size='100\"'
#text(.92, .5, size, horizontalalignment='center', verticalalignment='center',rotation=270, transform=ax.transAxes,fontsize=10)
#text(.1, .5, s, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
#text(.1, .5, s, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
s='$'+self.prefix+': \ '+galname+'$'
title(s,fontsize=12)
s='$ r-band$'
ylabel(s,fontsize=14)
xlabel(r'$100 X 100 \ arcsec^2$',fontsize=10)
#ylabel('$100 \ arcsec$')
## subplot(nrow,ncol,j+2)#sdss masked image
## fits=pyfits.open(simages[ngal])
## im=fits[0].data.copy()
## fits.close()
## axis('equal')
## imshow(-1.*(im),interpolation='nearest',origin='upper')#,cmap='binary')#,vmin=myvmin,vmax=myvmax,cmap=cm.Greys)
## ax=gca()
## ax.set_yticklabels(([]))
## ax.set_xticklabels(([]))
## text(.9, .5, galname, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
subplot(nrow,ncol,j+3)#sdss profile
edat=load(sfiles[ngal],usecols=[1,2,3,21,40])
x=edat[:,0]
y=edat[:,1]
yerr=edat[:,2]
tflux=edat[:,3]
sarea=edat[:,4]
plot(x,y,'b.')
errorbar(x,y,yerr,fmt=None)
r90,sky,r90EncFlux,MaxEncFlux,r30,r30EncFlux=calcC90(x,y,yerr,tflux)
self.r30SDSS[ngal]=r30
self.r90SDSS[ngal]=r90
self.skySDSS[ngal]=sky
self.r30EncFluxSDSS[ngal]=r30EncFlux
self.r90EncFluxSDSS[ngal]=r90EncFlux
self.MaxEncFluxSDSS[ngal]=MaxEncFlux
axhline(sky,color='k',ls=':')
axvline(r90,color='k',ls='--')
axvline(r30,color='c',ls='--')
xlabel('$r \ (arcsec)$')
ylabel('$I(r)$')
s='$sky = %5.2f, \ r30 = %5.1f, \ r90 = %5.1f$'%(sky,r30,r90)
title(s,fontsize=10)
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
#text(.1, .5, s, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
subplot(nrow,ncol,j+2)
plot(x,tflux,'b.')
#plot(x,y*sarea,'r.')
axhline(MaxEncFlux,color='k',ls=':')
axvline(r90EncFlux,color='k',ls='--')
axvline(r30EncFlux,color='c',ls='--')
s='$max(F_{enc}) = %5.2e, \ r30 = %5.1f, \ r90 = %5.1f$'%(MaxEncFlux,r30EncFlux,r90EncFlux)
title(s,fontsize=10)
xlabel('$r \ (arcsec)$')
ylabel('$\Sigma Flux(<r)$')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
xlim(0,50)
subplot(nrow,ncol,j+4)#sdss ln profile with fit
xfit=(x[y>5])
yfit=log(y[y>5])
m,b=polyfit(xfit,yfit,1)
plot(xfit,yfit-b,'r^')
xlabel('$r (arcsec)$')
ylabel(r'$ln(I(r))-ln(I_0)$')
#gradient, intercept, r_value, p_value, std_err = stats.linregress(xfit,yfit)
plot(xfit,m*xfit,'g')
axvline(-1./m,color='k',ls='--')
axhline(-1,color='k',ls=':')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
s='$R_0 = %5.1f$'%(-1./m)
title(s, fontsize=10)
self.r0SDSS[ngal]=-1./m
subplot(nrow,ncol,j+5)#sdss image
fits=pyfits.open(s24images[ngal])
im=fits[0].data.copy()
fits.close()
axis('equal')
imshow(-1.*(im),interpolation='nearest',origin='upper',vmin=vmin24,vmax=vmax24,cmap=cm.Greys)
ax=gca()
ax.set_yticklabels(([]))
ax.set_xticklabels(([]))
#text(.9, .5, galname, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
ylabel('$24 \ \mu m$',fontsize=14)
s1='$\Delta v/\sigma = %5.2f, \ \Delta r/R_{200} = %5.2f$'%(redshift[ngal],dr[ngal])
title(s1,fontsize=10)
xlabel(r'$100 X 100 \ arcsec^2$',fontsize=10)
## subplot(nrow,ncol,j+6)#sdss masked image
## fits=pyfits.open(s24images[ngal])
## im=fits[0].data.copy()
## fits.close()
## axis('equal')
## imshow(-1.*(im),interpolation='nearest',origin='upper')#,cmap='binary')#,vmin=myvmin,vmax=myvmax,cmap=cm.Greys)
## ax=gca()
## ax.set_yticklabels(([]))
## ax.set_xticklabels(([]))
## text(.9, .5, galname, horizontalalignment='center', verticalalignment='center',rotation=90, transform=ax.transAxes)
subplot(nrow,ncol,j+7)#sdss profile
edat=load(s24files[ngal],usecols=[1,2,3,21])
x=edat[:,0]
y=edat[:,1]
yerr=edat[:,2]
tflux=edat[:,3]
plot(x*pscale24,y,'b.')
errorbar(x*pscale24,y,yerr,fmt=None)
r90,sky,r90EncFlux,MaxEncFlux,r30,r30EncFlux=calcC90(x*pscale24,y,yerr,tflux)
self.r30F24[ngal]=r30
self.r90F24[ngal]=r90
self.skyF24[ngal]=sky
self.r30EncFluxF24[ngal]=r30EncFlux
self.r90EncFluxF24[ngal]=r90EncFlux
self.MaxEncFluxF24[ngal]=MaxEncFlux
xlabel('$r \ (arcsec)$')
ylabel('$I(r)$')
s='$sky = %5.2f, \ r30 = %5.1f, \ r90 = %5.1f$'%(sky,r30,r90)
title(s,fontsize=10)
axhline(sky,color='k',ls=':')
axvline(r90,color='k',ls='--')
axvline(r30,color='c',ls='--')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
xlim(0,50)
subplot(nrow,ncol,j+6)
plot(x*pscale24,tflux,'b.')
axhline(MaxEncFlux,color='k',ls=':')
axvline(r90EncFlux,color='k',ls='--')
axvline(r30EncFlux,color='c',ls='--')
xlabel('$r \ (arcsec)$')
ylabel('$\Sigma Flux(<r)$')
s='$max(F_{enc}) = %5.2e, \ r30 = %5.1f, \ r90 = %5.1f$'%(MaxEncFlux,r30EncFlux,r90EncFlux)
title(s,fontsize=10)
xlim(0,50)
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
subplot(nrow,ncol,j+8)#sdss ln profile with fit
xfit=(x[y>.05])
yfit=log(y[y>.05])
if len(xfit) > 1:
m,b=polyfit(xfit*pscale24,yfit,1)
plot(xfit*pscale24,m*xfit*pscale24,'g')
plot(xfit*pscale24,yfit-b,'r^')
else:
plot(xfit*pscale24,yfit,'r^')
xlabel('$r (arcsec)$')
ylabel(r'$ln(I(r))-ln(I_0)$')
#gradient, intercept, r_value, p_value, std_err = stats.linregress(xfit,yfit)
axvline(-1./m,color='k',ls='--')
axhline(-1,color='k',ls=':')
xticks(fontsize=xticksize)
yticks(fontsize=yticksize)
s='$R_0 = %5.1f$'%(-1./m)
title(s, fontsize=10)
self.r0F24[ngal]=-1./m
ngal += 1
if ngal >= ngaltot:
figname=self.prefix+'Profiles'+str(galname)+'.png'
savefig(figname)
break
figname=self.prefix+'Profiles'+str(galname)+'.png'
savefig(figname)
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
homedir='/home/rfinn/'
mkw11=cluster('MKW11')
mkw8=cluster('MKW8')
awm4=cluster('AWM4')
a2052=cluster('A2052')
a2063=cluster('A2063')
ngc=cluster('NGC6107')
coma=cluster('Coma')
herc=cluster('Hercules')
a1367=cluster('A1367')
def plotpositionsall():
figure()
clf()
subplots_adjust(wspace=.25,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotpositions()
ax=gca()
text(-.75,-.35,'RA (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-2.8,1.9,'Dec (deg)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotPositionsAll.eps')
def plotpositionson24all():
figure()
clf()
subplots_adjust(wspace=.25,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotpositionson24()
ax=gca()
text(-.75,-.35,'RA (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-2.8,1.9,'Dec (deg)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotPositionsOn24All.eps')
def plotrelativepositionson24all():
figure(figsize=[9,9])
clf()
subplots_adjust(wspace=.25,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotrelativepositionson24()
leg=legend(numpoints=1)#,fontsize=12)
for t in leg.get_texts():
t.set_fontsize('small')
ax=gca()
text(-.75,-.35,'$\Delta$RA (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-2.8,1.9,'$\Delta$Dec (deg)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotRelativePositionsOn24All.eps')
def plotveldrall():
figure(figsize=[9,6])
clf()
subplots_adjust(wspace=.35,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotveldr()
ax=gca()
text(-.75,-.35,'dr (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-3.1,1.9,'$V_r$ (km/s)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotVeldrAll.eps')
def plotveldron24all():
figure(figsize=[9,9])
clf()
subplots_adjust(wspace=.35,hspace=.35)
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
subplot(3,3,i)
cl.plotveldron24()
ax=gca()
text(-.75,-.35,'$\Delta$r (deg)',fontsize=18,horizontalalignment='center',transform=ax.transAxes)
subplot(3,3,4)
text(-3.1,1.9,'$V_r$ (km/s)',fontsize=18,verticalalignment='center',rotation=90,transform=ax.transAxes)
savefig(homedir+'research/LocalClusters/SamplePlots/PlotVeldrAllOn24.eps')
def checkmorphall():
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
cl.checkmorph()
def getSpirals():
for i in range(1,10):
if i == 1:
cl=mkw11
if i == 2:
cl=mkw8
if i == 3:
cl=awm4
if i == 4:
cl = ngc
if i == 5:
cl = a2052
if i == 6:
cl = a2063
if i == 7:
cl = coma
if i == 8:
cl = herc
if i == 9:
cl = a1367
cl.getFilesForProfileFitting()
#plotpositionsall()
#plotpositionson24all()
#plotveldrall()
#getSpirals()
#mkw11.fitprofiles()
|
gpl-3.0
|
Endika/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/test_mixed_modulestore.py
|
7
|
136521
|
"""
Unit tests for the Mixed Modulestore, with DDT for the various stores (Split, Draft, XML)
"""
from collections import namedtuple
import datetime
import logging
import ddt
import itertools
import mimetypes
from uuid import uuid4
from contextlib import contextmanager
from mock import patch, Mock, call
# Mixed modulestore depends on django, so we'll manually configure some django settings
# before importing the module
# TODO remove this import and the configuration -- xmodule should not depend on django!
from django.conf import settings
# This import breaks this test file when run separately. Needs to be fixed! (PLAT-449)
from nose.plugins.attrib import attr
import pymongo
from pytz import UTC
from shutil import rmtree
from tempfile import mkdtemp
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.tests.utils import MongoContentstoreBuilder
from xmodule.contentstore.content import StaticContent
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.xml_exporter import export_course_to_xml
if not settings.configured:
settings.configure()
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator, LibraryLocator
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import UnsupportedRevisionError, DIRECT_ONLY_CATEGORIES
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError, ReferentialIntegrityError, NoPathToItem
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.search import path_to_location, navigation_index
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.modulestore.tests.factories import check_mongo_calls, check_exact_number_of_calls, \
mongo_uses_error_check
from xmodule.modulestore.tests.utils import create_modulestore_instance, LocationMixin, mock_tab_from_json
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.tests import DATA_DIR, CourseComparisonTest
log = logging.getLogger(__name__)
class CommonMixedModuleStoreSetup(CourseComparisonTest):
"""
Quasi-superclass which tests Location based apps against both split and mongo dbs (Locator and
Location-based dbs)
"""
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
COLLECTION = 'modulestore'
ASSET_COLLECTION = 'assetstore'
FS_ROOT = DATA_DIR
DEFAULT_CLASS = 'xmodule.raw_module.RawDescriptor'
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': ''
MONGO_COURSEID = 'MITx/999/2013_Spring'
XML_COURSEID1 = 'edX/toy/2012_Fall'
XML_COURSEID2 = 'edX/simple/2012_Fall'
BAD_COURSE_ID = 'edX/simple'
modulestore_options = {
'default_class': DEFAULT_CLASS,
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
'xblock_mixins': (EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin),
}
DOC_STORE_CONFIG = {
'host': HOST,
'port': PORT,
'db': DB,
'collection': COLLECTION,
'asset_collection': ASSET_COLLECTION,
}
MAPPINGS = {
XML_COURSEID1: 'xml',
XML_COURSEID2: 'xml',
BAD_COURSE_ID: 'xml',
}
OPTIONS = {
'stores': [
{
'NAME': ModuleStoreEnum.Type.mongo,
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
{
'NAME': ModuleStoreEnum.Type.split,
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
{
'NAME': ModuleStoreEnum.Type.xml,
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'xblock_mixins': modulestore_options['xblock_mixins'],
}
},
],
'xblock_mixins': modulestore_options['xblock_mixins'],
}
def _compare_ignore_version(self, loc1, loc2, msg=None):
"""
AssertEqual replacement for CourseLocator
"""
if loc1.for_branch(None) != loc2.for_branch(None):
self.fail(self._formatMessage(msg, u"{} != {}".format(unicode(loc1), unicode(loc2))))
def setUp(self):
"""
Set up the database for testing
"""
super(CommonMixedModuleStoreSetup, self).setUp()
self.exclude_field(None, 'wiki_slug')
self.exclude_field(None, 'xml_attributes')
self.exclude_field(None, 'parent')
self.ignore_asset_key('_id')
self.ignore_asset_key('uploadDate')
self.ignore_asset_key('content_son')
self.ignore_asset_key('thumbnail_location')
self.options = getattr(self, 'options', self.OPTIONS)
self.connection = pymongo.MongoClient(
host=self.HOST,
port=self.PORT,
tz_aware=True,
)
self.connection.drop_database(self.DB)
self.addCleanup(self.connection.drop_database, self.DB)
self.addCleanup(self.connection.close)
self.addTypeEqualityFunc(BlockUsageLocator, '_compare_ignore_version')
self.addTypeEqualityFunc(CourseLocator, '_compare_ignore_version')
# define attrs which get set in initdb to quell pylint
self.writable_chapter_location = self.store = self.fake_location = self.xml_chapter_location = None
self.course_locations = {}
self.user_id = ModuleStoreEnum.UserID.test
# pylint: disable=invalid-name
def _create_course(self, course_key):
"""
Create a course w/ one item in the persistence store using the given course & item location.
"""
# create course
with self.store.bulk_operations(course_key):
self.course = self.store.create_course(course_key.org, course_key.course, course_key.run, self.user_id)
if isinstance(self.course.id, CourseLocator):
self.course_locations[self.MONGO_COURSEID] = self.course.location
else:
self.assertEqual(self.course.id, course_key)
# create chapter
chapter = self.store.create_child(self.user_id, self.course.location, 'chapter', block_id='Overview')
self.writable_chapter_location = chapter.location
def _create_block_hierarchy(self):
"""
Creates a hierarchy of blocks for testing
Each block's (version_agnostic) location is assigned as a field of the class and can be easily accessed
"""
BlockInfo = namedtuple('BlockInfo', 'field_name, category, display_name, sub_tree')
trees = [
BlockInfo(
'chapter_x', 'chapter', 'Chapter_x', [
BlockInfo(
'sequential_x1', 'sequential', 'Sequential_x1', [
BlockInfo(
'vertical_x1a', 'vertical', 'Vertical_x1a', [
BlockInfo('problem_x1a_1', 'problem', 'Problem_x1a_1', []),
BlockInfo('problem_x1a_2', 'problem', 'Problem_x1a_2', []),
BlockInfo('problem_x1a_3', 'problem', 'Problem_x1a_3', []),
BlockInfo('html_x1a_1', 'html', 'HTML_x1a_1', []),
]
),
BlockInfo(
'vertical_x1b', 'vertical', 'Vertical_x1b', []
)
]
),
BlockInfo(
'sequential_x2', 'sequential', 'Sequential_x2', []
)
]
),
BlockInfo(
'chapter_y', 'chapter', 'Chapter_y', [
BlockInfo(
'sequential_y1', 'sequential', 'Sequential_y1', [
BlockInfo(
'vertical_y1a', 'vertical', 'Vertical_y1a', [
BlockInfo('problem_y1a_1', 'problem', 'Problem_y1a_1', []),
BlockInfo('problem_y1a_2', 'problem', 'Problem_y1a_2', []),
BlockInfo('problem_y1a_3', 'problem', 'Problem_y1a_3', []),
]
)
]
)
]
)
]
def create_sub_tree(parent, block_info):
"""
recursive function that creates the given block and its descendants
"""
block = self.store.create_child(
self.user_id, parent.location,
block_info.category, block_id=block_info.display_name,
fields={'display_name': block_info.display_name},
)
for tree in block_info.sub_tree:
create_sub_tree(block, tree)
setattr(self, block_info.field_name, block.location)
with self.store.bulk_operations(self.course.id):
for tree in trees:
create_sub_tree(self.course, tree)
def _course_key_from_string(self, string):
"""
Get the course key for the given course string
"""
return self.course_locations[string].course_key
def _has_changes(self, location):
"""
Helper function that loads the item before calling has_changes
"""
return self.store.has_changes(self.store.get_item(location))
# pylint: disable=dangerous-default-value
def _initialize_mixed(self, mappings=MAPPINGS, contentstore=None):
"""
initializes the mixed modulestore.
"""
self.store = MixedModuleStore(
contentstore, create_modulestore_instance=create_modulestore_instance,
mappings=mappings,
**self.options
)
self.addCleanup(self.store.close_all_connections)
def initdb(self, default):
"""
Initialize the database and create one test course in it
"""
# set the default modulestore
store_configs = self.options['stores']
for index in range(len(store_configs)):
if store_configs[index]['NAME'] == default:
if index > 0:
store_configs[index], store_configs[0] = store_configs[0], store_configs[index]
break
self._initialize_mixed()
# convert to CourseKeys
self.course_locations = {
course_id: CourseLocator.from_string(course_id)
for course_id in [self.MONGO_COURSEID, self.XML_COURSEID1, self.XML_COURSEID2]
}
# and then to the root UsageKey
self.course_locations = {
course_id: course_key.make_usage_key('course', course_key.run)
for course_id, course_key in self.course_locations.iteritems()
}
mongo_course_key = self.course_locations[self.MONGO_COURSEID].course_key
self.fake_location = self.store.make_course_key(mongo_course_key.org, mongo_course_key.course, mongo_course_key.run).make_usage_key('vertical', 'fake')
self.xml_chapter_location = self.course_locations[self.XML_COURSEID1].replace(
category='chapter', name='Overview'
)
self._create_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEquals(default, self.store.get_modulestore_type(self.course.id))
@ddt.ddt
@attr('mongo')
class TestMixedModuleStore(CommonMixedModuleStoreSetup):
"""
Tests of the MixedModulestore interface methods.
"""
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_modulestore_type(self, default_ms):
"""
Make sure we get back the store type we expect for given mappings
"""
self.initdb(default_ms)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.XML_COURSEID1)), ModuleStoreEnum.Type.xml
)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.XML_COURSEID2)), ModuleStoreEnum.Type.xml
)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.MONGO_COURSEID)), default_ms
)
# try an unknown mapping, it should be the 'default' store
self.assertEqual(self.store.get_modulestore_type(
SlashSeparatedCourseKey('foo', 'bar', '2012_Fall')), default_ms
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_modulestore_cache(self, default_ms):
"""
Make sure we cache discovered course mappings
"""
self.initdb(default_ms)
# unset mappings
self.store.mappings = {}
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with check_exact_number_of_calls(self.store.default_modulestore, 'has_course', 1):
self.assertEqual(self.store.default_modulestore, self.store._get_modulestore_for_courselike(course_key)) # pylint: disable=protected-access
self.assertIn(course_key, self.store.mappings)
self.assertEqual(self.store.default_modulestore, self.store._get_modulestore_for_courselike(course_key)) # pylint: disable=protected-access
@ddt.data(*itertools.product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False)
))
@ddt.unpack
def test_duplicate_course_error(self, default_ms, reset_mixed_mappings):
"""
Make sure we get back the store type we expect for given mappings
"""
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
if reset_mixed_mappings:
self.store.mappings = {}
with self.assertRaises(DuplicateCourseError):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
# Draft:
# problem: One lookup to locate an item that exists
# fake: one w/ wildcard version
# split has one lookup for the course and then one for the course items
@ddt.data((ModuleStoreEnum.Type.mongo, [1, 1], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_has_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
self.assertTrue(self.store.has_item(self.course_locations[self.XML_COURSEID1]))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertTrue(self.store.has_item(self.problem_x1a_1))
# try negative cases
self.assertFalse(self.store.has_item(
self.course_locations[self.XML_COURSEID1].replace(name='not_findable', category='problem')
))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertFalse(self.store.has_item(self.fake_location))
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.has_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# draft queries:
# problem: find draft item, find all items pertinent to inheritance computation, find parent
# non-existent problem: find draft, find published
# split:
# problem: active_versions, structure
# non-existent problem: ditto
@ddt.data((ModuleStoreEnum.Type.mongo, [3, 2], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_get_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
self.assertIsNotNone(self.store.get_item(self.course_locations[self.XML_COURSEID1]))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertIsNotNone(self.store.get_item(self.problem_x1a_1))
# try negative cases
with self.assertRaises(ItemNotFoundError):
self.store.get_item(
self.course_locations[self.XML_COURSEID1].replace(name='not_findable', category='problem')
)
with check_mongo_calls(max_find.pop(0), max_send):
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.fake_location)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.get_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Draft:
# wildcard query, 6! load pertinent items for inheritance calls, load parents, course root fetch (why)
# Split:
# active_versions (with regex), structure, and spurious active_versions refetch
@ddt.data((ModuleStoreEnum.Type.mongo, 14, 0), (ModuleStoreEnum.Type.split, 3, 0))
@ddt.unpack
def test_get_items(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
course_locn = self.course_locations[self.XML_COURSEID1]
# NOTE: use get_course if you just want the course. get_items is expensive
modules = self.store.get_items(course_locn.course_key, qualifiers={'category': 'course'})
self.assertEqual(len(modules), 1)
self.assertEqual(modules[0].location, course_locn)
course_locn = self.course_locations[self.MONGO_COURSEID]
with check_mongo_calls(max_find, max_send):
modules = self.store.get_items(course_locn.course_key, qualifiers={'category': 'problem'})
self.assertEqual(len(modules), 6)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.get_items(
self.course_locations[self.MONGO_COURSEID].course_key,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
@ddt.data((ModuleStoreEnum.Type.split, 2, False), (ModuleStoreEnum.Type.mongo, 3, True))
@ddt.unpack
def test_get_items_include_orphans(self, default_ms, expected_items_in_tree, orphan_in_items):
"""
Test `include_orphans` option helps in returning only those items which are present in course tree.
It tests that orphans are not fetched when calling `get_item` with `include_orphans`.
Params:
expected_items_in_tree:
Number of items that will be returned after `get_items` would be called with `include_orphans`.
In split, it would not get orphan items.
In mongo, it would still get orphan items because `include_orphans` would not have any impact on mongo
modulestore which will return same number of items as called without `include_orphans` kwarg.
orphan_in_items:
When `get_items` is called with `include_orphans` kwarg, then check if an orphan is returned or not.
False when called in split modulestore because in split get_items is expected to not retrieve orphans
now because of `include_orphans`.
True when called in mongo modulstore because `include_orphans` does not have any effect on mongo.
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
course_key = test_course.id
items = self.store.get_items(course_key)
# Check items found are either course or about type
self.assertTrue(set(['course', 'about']).issubset(set([item.location.block_type for item in items])))
# Assert that about is a detached category found in get_items
self.assertIn(
[item.location.block_type for item in items if item.location.block_type == 'about'][0],
DETACHED_XBLOCK_TYPES
)
self.assertEqual(len(items), 2)
# Check that orphans are not found
orphans = self.store.get_orphans(course_key)
self.assertEqual(len(orphans), 0)
# Add an orphan to test course
orphan = course_key.make_usage_key('chapter', 'OrphanChapter')
self.store.create_item(self.user_id, orphan.course_key, orphan.block_type, block_id=orphan.block_id)
# Check that now an orphan is found
orphans = self.store.get_orphans(course_key)
self.assertIn(orphan, orphans)
self.assertEqual(len(orphans), 1)
# Check now `get_items` retrieves an extra item added above which is an orphan.
items = self.store.get_items(course_key)
self.assertIn(orphan, [item.location for item in items])
self.assertEqual(len(items), 3)
# Check now `get_items` with `include_orphans` kwarg does not retrieves an orphan block.
items_in_tree = self.store.get_items(course_key, include_orphans=False)
# Check that course and about blocks are found in get_items
self.assertTrue(set(['course', 'about']).issubset(set([item.location.block_type for item in items_in_tree])))
# Check orphan is found or not - this is based on mongo/split modulestore. It should be found in mongo.
self.assertEqual(orphan in [item.location for item in items_in_tree], orphan_in_items)
self.assertEqual(len(items_in_tree), expected_items_in_tree)
# draft: get draft, get ancestors up to course (2-6), compute inheritance
# sends: update problem and then each ancestor up to course (edit info)
# split: active_versions, definitions (calculator field), structures
# 2 sends to update index & structure (note, it would also be definition if a content field changed)
@ddt.data((ModuleStoreEnum.Type.mongo, 7, 5), (ModuleStoreEnum.Type.split, 3, 2))
@ddt.unpack
def test_update_item(self, default_ms, max_find, max_send):
"""
Update should fail for r/o dbs and succeed for r/w ones
"""
self.initdb(default_ms)
self._create_block_hierarchy()
course = self.store.get_course(self.course_locations[self.XML_COURSEID1].course_key)
# if following raised, then the test is really a noop, change it
self.assertFalse(course.show_calculator, "Default changed making test meaningless")
course.show_calculator = True
with self.assertRaises(NotImplementedError): # ensure it doesn't allow writing
self.store.update_item(course, self.user_id)
# now do it for a r/w db
problem = self.store.get_item(self.problem_x1a_1)
# if following raised, then the test is really a noop, change it
self.assertNotEqual(problem.max_attempts, 2, "Default changed making test meaningless")
problem.max_attempts = 2
with check_mongo_calls(max_find, max_send):
problem = self.store.update_item(problem, self.user_id)
self.assertEqual(problem.max_attempts, 2, "Update didn't persist")
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_direct_only(self, default_ms):
"""
Tests that has_changes() returns false when a new xblock in a direct only category is checked
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create dummy direct only xblocks
chapter = self.store.create_item(
self.user_id,
test_course.id,
'chapter',
block_id='vertical_container'
)
# Check that neither xblock has changes
self.assertFalse(self.store.has_changes(test_course))
self.assertFalse(self.store.has_changes(chapter))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes(self, default_ms):
"""
Tests that has_changes() only returns true when changes are present
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self.store.has_changes(xblock))
# Publish and verify that there are no unpublished changes
newXBlock = self.store.publish(xblock.location, self.user_id)
self.assertFalse(self.store.has_changes(newXBlock))
# Change the component, then check that there now are changes
component = self.store.get_item(xblock.location)
component.display_name = 'Changed Display Name'
component = self.store.update_item(component, self.user_id)
self.assertTrue(self.store.has_changes(component))
# Publish and verify again
component = self.store.publish(component.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_draft_mode(self, default_ms):
"""
After revert_to_published() the has_changes() should return false if draft has no changes
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self.store.has_changes(xblock))
# Publish and verify that there are no unpublished changes
component = self.store.publish(xblock.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
self.store.revert_to_published(component.location, self.user_id)
component = self.store.get_item(component.location)
self.assertFalse(self.store.has_changes(component))
# Publish and verify again
component = self.store.publish(component.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_published_mode(self, default_ms):
"""
After revert_to_published() the has_changes() should return true if draft has changes
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self.store.has_changes(xblock))
# Publish and verify that there are no unpublished changes
component = self.store.publish(xblock.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
# Discard changes and verify that there are no changes
self.store.revert_to_published(component.location, self.user_id)
component = self.store.get_item(component.location)
self.assertFalse(self.store.has_changes(component))
# Change the component, then check that there now are changes
component = self.store.get_item(component.location)
component.display_name = 'Changed Display Name'
self.store.update_item(component, self.user_id)
# Verify that changes are present
self.assertTrue(self.store.has_changes(component))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_unit_stuck_in_published_mode_after_delete(self, default_ms):
"""
Test that a unit does not get stuck in published mode
after discarding a component changes and deleting a component
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy vertical & html component to test against
vertical = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
component = self.store.create_child(
self.user_id,
vertical.location,
'html',
block_id='html_component'
)
# publish vertical changes
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# Change a component, then check that there now are changes
component = self.store.get_item(component.location)
component.display_name = 'Changed Display Name'
self.store.update_item(component, self.user_id)
self.assertTrue(self._has_changes(vertical.location))
# Discard changes and verify that there are no changes
self.store.revert_to_published(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# Delete the component and verify that the unit has changes
self.store.delete_item(component.location, self.user_id)
vertical = self.store.get_item(vertical.location)
self.assertTrue(self._has_changes(vertical.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_publish_automatically_after_delete_unit(self, default_ms):
"""
Check that sequential publishes automatically after deleting a unit
"""
self.initdb(default_ms)
test_course = self.store.create_course('test_org', 'test_course', 'test_run', self.user_id)
# create sequential and vertical to test against
sequential = self.store.create_child(self.user_id, test_course.location, 'sequential', 'test_sequential')
vertical = self.store.create_child(self.user_id, sequential.location, 'vertical', 'test_vertical')
# publish sequential changes
self.store.publish(sequential.location, self.user_id)
self.assertFalse(self._has_changes(sequential.location))
# delete vertical and check sequential has no changes
self.store.delete_item(vertical.location, self.user_id)
self.assertFalse(self._has_changes(sequential.location))
def setup_has_changes(self, default_ms):
"""
Common set up for has_changes tests below.
Returns a dictionary of useful location maps for testing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
locations = {
'grandparent': self.chapter_x,
'parent_sibling': self.sequential_x2,
'parent': self.sequential_x1,
'child_sibling': self.vertical_x1b,
'child': self.vertical_x1a,
}
# Publish the vertical units
self.store.publish(locations['parent_sibling'], self.user_id)
self.store.publish(locations['parent'], self.user_id)
return locations
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_ancestors(self, default_ms):
"""
Tests that has_changes() returns true on ancestors when a child is changed
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
# Change the child
child = self.store.get_item(locations['child'])
child.display_name = 'Changed Display Name'
self.store.update_item(child, self.user_id)
# All ancestors should have changes, but not siblings
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
self.assertTrue(self._has_changes(locations['child']))
self.assertFalse(self._has_changes(locations['parent_sibling']))
self.assertFalse(self._has_changes(locations['child_sibling']))
# Publish the unit with changes
self.store.publish(locations['parent'], self.user_id)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_publish_ancestors(self, default_ms):
"""
Tests that has_changes() returns false after a child is published only if all children are unchanged
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
# Change both children
child = self.store.get_item(locations['child'])
child_sibling = self.store.get_item(locations['child_sibling'])
child.display_name = 'Changed Display Name'
child_sibling.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
self.store.update_item(child_sibling, user_id=self.user_id)
# Verify that ancestors have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Publish one child
self.store.publish(locations['child_sibling'], self.user_id)
# Verify that ancestors still have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Publish the other child
self.store.publish(locations['child'], self.user_id)
# Verify that ancestors now have no changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_add_remove_child(self, default_ms):
"""
Tests that has_changes() returns true for the parent when a child with changes is added
and false when that child is removed.
"""
locations = self.setup_has_changes(default_ms)
# Test that the ancestors don't have changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
# Create a new child and attach it to parent
self.store.create_child(
self.user_id,
locations['parent'],
'vertical',
block_id='new_child',
)
# Verify that the ancestors now have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Remove the child from the parent
parent = self.store.get_item(locations['parent'])
parent.children = [locations['child'], locations['child_sibling']]
self.store.update_item(parent, user_id=self.user_id)
# Verify that ancestors now have no changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_has_changes_non_direct_only_children(self, default_ms):
"""
Tests that has_changes() returns true after editing the child of a vertical (both not direct only categories).
"""
self.initdb(default_ms)
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
child = self.store.create_child(
self.user_id,
parent.location,
'html',
block_id='child',
)
self.store.publish(parent.location, self.user_id)
# Verify that there are no changes
self.assertFalse(self._has_changes(parent.location))
self.assertFalse(self._has_changes(child.location))
# Change the child
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
# Verify that both parent and child have changes
self.assertTrue(self._has_changes(parent.location))
self.assertTrue(self._has_changes(child.location))
@ddt.data(*itertools.product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(ModuleStoreEnum.Branch.draft_preferred, ModuleStoreEnum.Branch.published_only)
))
@ddt.unpack
def test_has_changes_missing_child(self, default_ms, default_branch):
"""
Tests that has_changes() does not throw an exception when a child doesn't exist.
"""
self.initdb(default_ms)
with self.store.branch_setting(default_branch, self.course.id):
# Create the parent and point it to a fake child
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
parent.children += [self.course.id.make_usage_key('vertical', 'does_not_exist')]
parent = self.store.update_item(parent, self.user_id)
# Check the parent for changes should return True and not throw an exception
self.assertTrue(self.store.has_changes(parent))
# Draft
# Find: find parents (definition.children query), get parent, get course (fill in run?),
# find parents of the parent (course), get inheritance items,
# get item (to delete subtree), get inheritance again.
# Sends: delete item, update parent
# Split
# Find: active_versions, 2 structures (published & draft), definition (unnecessary)
# Sends: updated draft and published structures and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 7, 2), (ModuleStoreEnum.Type.split, 3, 3))
@ddt.unpack
def test_delete_item(self, default_ms, max_find, max_send):
"""
Delete should reject on r/o db and work on r/w one
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
# r/o try deleting the chapter (is here to ensure it can't be deleted)
with self.assertRaises(NotImplementedError):
self.store.delete_item(self.xml_chapter_location, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.writable_chapter_location.course_key):
with check_mongo_calls(max_find, max_send):
self.store.delete_item(self.writable_chapter_location, self.user_id)
# verify it's gone
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location)
# verify it's gone from published too
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location, revision=ModuleStoreEnum.RevisionOption.published_only)
# Draft:
# queries: find parent (definition.children), count versions of item, get parent, count grandparents,
# inheritance items, draft item, draft child, inheritance
# sends: delete draft vertical and update parent
# Split:
# queries: active_versions, draft and published structures, definition (unnecessary)
# sends: update published (why?), draft, and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 9, 2), (ModuleStoreEnum.Type.split, 4, 3))
@ddt.unpack
def test_delete_private_vertical(self, default_ms, max_find, max_send):
"""
Because old mongo treated verticals as the first layer which could be draft, it has some interesting
behavioral properties which this deletion test gets at.
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID],
'vertical', block_id='private'
)
private_leaf = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, private_vert.location, 'html', block_id='private_leaf'
)
# verify pre delete state (just to verify that the test is valid)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(private_leaf.location.version_guid)
else:
vert_loc = private_vert.location
self.assertTrue(self.store.has_item(vert_loc))
self.assertTrue(self.store.has_item(private_leaf.location))
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
self.assertIn(vert_loc, course.children)
# delete the vertical and ensure the course no longer points to it
with check_mongo_calls(max_find, max_send):
self.store.delete_item(vert_loc, self.user_id)
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(course.location.version_guid)
leaf_loc = private_leaf.location.for_version(course.location.version_guid)
else:
vert_loc = private_vert.location
leaf_loc = private_leaf.location
self.assertFalse(self.store.has_item(vert_loc))
self.assertFalse(self.store.has_item(leaf_loc))
self.assertNotIn(vert_loc, course.children)
# Draft:
# find: find parent (definition.children) 2x, find draft item, get inheritance items
# send: one delete query for specific item
# Split:
# find: active_version & structure (cached)
# send: update structure and active_versions
@ddt.data((ModuleStoreEnum.Type.mongo, 4, 1), (ModuleStoreEnum.Type.split, 2, 2))
@ddt.unpack
def test_delete_draft_vertical(self, default_ms, max_find, max_send):
"""
Test deleting a draft vertical which has a published version.
"""
self.initdb(default_ms)
# reproduce bug STUD-1965
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID], 'vertical', block_id='publish'
)
private_leaf = self.store.create_child(
self.user_id, private_vert.location, 'html', block_id='bug_leaf'
)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.delete_item(
private_leaf.location,
self.user_id,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
self.store.publish(private_vert.location, self.user_id)
private_leaf.display_name = 'change me'
private_leaf = self.store.update_item(private_leaf, self.user_id)
# test succeeds if delete succeeds w/o error
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
with check_mongo_calls(max_find, max_send):
self.store.delete_item(private_leaf.location, self.user_id)
# Draft:
# 1) find all courses (wildcard),
# 2) get each course 1 at a time (1 course),
# 3) wildcard split if it has any (1) but it doesn't
# Split:
# 1) wildcard split search,
# 2-4) active_versions, structure, definition (s/b lazy; so, unnecessary)
# 5) wildcard draft mongo which has none
@ddt.data((ModuleStoreEnum.Type.mongo, 3, 0), (ModuleStoreEnum.Type.split, 5, 0))
@ddt.unpack
def test_get_courses(self, default_ms, max_find, max_send):
self.initdb(default_ms)
# we should have 3 total courses across all stores
with check_mongo_calls(max_find, max_send):
courses = self.store.get_courses()
course_ids = [course.location for course in courses]
self.assertEqual(len(courses), 3, "Not 3 courses: {}".format(course_ids))
self.assertIn(self.course_locations[self.MONGO_COURSEID], course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID1], course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID2], course_ids)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
draft_courses = self.store.get_courses(remove_branch=True)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
published_courses = self.store.get_courses(remove_branch=True)
self.assertEquals([c.id for c in draft_courses], [c.id for c in published_courses])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_child_detached_tabs(self, default_ms):
"""
test 'create_child' method with a detached category ('static_tab')
to check that new static tab is not a direct child of the course
"""
self.initdb(default_ms)
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(mongo_course.children), 1)
# create a static tab of the course
self.store.create_child(
self.user_id,
self.course.location,
'static_tab'
)
# now check that the course has same number of children
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(mongo_course.children), 1)
def test_xml_get_courses(self):
"""
Test that the xml modulestore only loaded the courses from the maps.
"""
self.initdb(ModuleStoreEnum.Type.mongo)
xml_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml) # pylint: disable=protected-access
courses = xml_store.get_courses()
self.assertEqual(len(courses), 2)
course_ids = [course.id for course in courses]
self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, course_ids)
# this course is in the directory from which we loaded courses but not in the map
self.assertNotIn("edX/toy/TT_2012_Fall", course_ids)
def test_xml_no_write(self):
"""
Test that the xml modulestore doesn't allow write ops.
"""
self.initdb(ModuleStoreEnum.Type.mongo)
xml_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml) # pylint: disable=protected-access
# the important thing is not which exception it raises but that it raises an exception
with self.assertRaises(AttributeError):
xml_store.create_course("org", "course", "run", self.user_id)
# draft is 2: find out which ms owns course, get item
# split: active_versions, structure, definition (to load course wiki string)
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 0), (ModuleStoreEnum.Type.split, 3, 0))
@ddt.unpack
def test_get_course(self, default_ms, max_find, max_send):
"""
This test is here for the performance comparison not functionality. It tests the performance
of getting an item whose scope.content fields are looked at.
"""
self.initdb(default_ms)
with check_mongo_calls(max_find, max_send):
course = self.store.get_item(self.course_locations[self.MONGO_COURSEID])
self.assertEqual(course.id, self.course_locations[self.MONGO_COURSEID].course_key)
course = self.store.get_item(self.course_locations[self.XML_COURSEID1])
self.assertEqual(course.id, self.course_locations[self.XML_COURSEID1].course_key)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_library(self, default_ms):
"""
Test that create_library and get_library work regardless of the default modulestore.
Other tests of MixedModulestore support are in test_libraries.py but this one must
be done here so we can test the configuration where Draft/old is the first modulestore.
"""
self.initdb(default_ms)
with self.store.default_store(ModuleStoreEnum.Type.split): # The CMS also wraps create_library like this
library = self.store.create_library("org", "lib", self.user_id, {"display_name": "Test Library"})
library_key = library.location.library_key
self.assertIsInstance(library_key, LibraryLocator)
# Now load with get_library and make sure it works:
library = self.store.get_library(library_key)
self.assertEqual(library.location.library_key, library_key)
# Clear the mappings so we can test get_library code path without mapping set:
self.store.mappings.clear()
library = self.store.get_library(library_key)
self.assertEqual(library.location.library_key, library_key)
# notice this doesn't test getting a public item via draft_preferred which draft would have 2 hits (split
# still only 2)
# Draft: get_parent
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_parent_locations(self, default_ms, max_find, max_send):
"""
Test a simple get parent for a direct only category (i.e, always published)
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with check_mongo_calls(max_find, max_send):
parent = self.store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
parent = self.store.get_parent_location(self.xml_chapter_location)
self.assertEqual(parent, self.course_locations[self.XML_COURSEID1])
def verify_get_parent_locations_results(self, expected_results):
"""
Verifies the results of calling get_parent_locations matches expected_results.
"""
for child_location, parent_location, revision in expected_results:
self.assertEqual(
parent_location,
self.store.get_parent_location(child_location, revision=revision)
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_parent_locations_moved_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.course = self.store.publish(self.course.location, self.user_id)
with self.store.bulk_operations(self.course.id):
# make drafts of verticals
self.store.convert_to_draft(self.vertical_x1a, self.user_id)
self.store.convert_to_draft(self.vertical_y1a, self.user_id)
# move child problem_x1a_1 to vertical_y1a
child_to_move_location = self.problem_x1a_1
new_parent_location = self.vertical_y1a
old_parent_location = self.vertical_x1a
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
old_parent = self.store.get_item(child_to_move_location).get_parent()
self.assertEqual(old_parent_location, old_parent.location)
child_to_move_contextualized = child_to_move_location.map_into_course(old_parent.location.course_key)
old_parent.children.remove(child_to_move_contextualized)
self.store.update_item(old_parent, self.user_id)
new_parent = self.store.get_item(new_parent_location)
new_parent.children.append(child_to_move_location)
self.store.update_item(new_parent, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
self.assertEqual(new_parent_location, self.store.get_item(child_to_move_location).get_parent().location)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertEqual(old_parent_location, self.store.get_item(child_to_move_location).get_parent().location)
old_parent_published_location = old_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, old_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
new_parent_published_location = new_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, new_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_parent_locations_deleted_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.store.publish(self.course.location, self.user_id)
# make draft of vertical
self.store.convert_to_draft(self.vertical_y1a, self.user_id)
# delete child problem_y1a_1
child_to_delete_location = self.problem_y1a_1
old_parent_location = self.vertical_y1a
self.store.delete_item(child_to_delete_location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, old_parent_location, None),
# Note: The following could be an unexpected result, but we want to avoid an extra database call
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, None, None),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_parent_location_draft(self, default_ms):
"""
Test that "get_parent_location" method returns first published parent
for a draft component, if it has many possible parents (including
draft parents).
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
# add another parent (unit) "vertical_x1b" for problem "problem_x1a_1"
mongo_store.collection.update(
self.vertical_x1b.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# convert first parent (unit) "vertical_x1a" of problem "problem_x1a_1" to draft
self.store.convert_to_draft(self.vertical_x1a, self.user_id)
item = self.store.get_item(self.vertical_x1a)
self.assertTrue(self.store.has_published_version(item))
# now problem "problem_x1a_1" has 3 parents [vertical_x1a (draft),
# vertical_x1a (published), vertical_x1b (published)]
# check that "get_parent_location" method of draft branch returns first
# published parent "vertical_x1a" without raising "AssertionError" for
# problem location revision
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# Draft:
# Problem path:
# 1. Get problem
# 2-6. get parent and rest of ancestors up to course
# 7-8. get sequential, compute inheritance
# 8-9. get vertical, compute inheritance
# 10-11. get other vertical_x1b (why?) and compute inheritance
# Split: active_versions & structure
@ddt.data((ModuleStoreEnum.Type.mongo, [12, 3], 0), (ModuleStoreEnum.Type.split, [2, 2], 0))
@ddt.unpack
def test_path_to_location(self, default_ms, num_finds, num_sends):
"""
Make sure that path_to_location works
"""
self.initdb(default_ms)
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self._create_block_hierarchy()
should_work = (
(self.problem_x1a_2,
(course_key, u"Chapter_x", u"Sequential_x1", u'Vertical_x1a', '1', self.problem_x1a_2)),
(self.chapter_x,
(course_key, "Chapter_x", None, None, None, self.chapter_x)),
)
for location, expected in should_work:
# each iteration has different find count, pop this iter's find count
with check_mongo_calls(num_finds.pop(0), num_sends):
path = path_to_location(self.store, location)
self.assertEqual(path, expected)
not_found = (
course_key.make_usage_key('video', 'WelcomeX'),
course_key.make_usage_key('course', 'NotHome'),
)
for location in not_found:
with self.assertRaises(ItemNotFoundError):
path_to_location(self.store, location)
# Orphaned items should not be found.
orphan = course_key.make_usage_key('chapter', 'OrphanChapter')
self.store.create_item(
self.user_id,
orphan.course_key,
orphan.block_type,
block_id=orphan.block_id
)
with self.assertRaises(NoPathToItem):
path_to_location(self.store, orphan)
def test_xml_path_to_location(self):
"""
Make sure that path_to_location works: should be passed a modulestore
with the toy and simple courses loaded.
"""
# only needs course_locations set
self.initdb(ModuleStoreEnum.Type.mongo)
course_key = self.course_locations[self.XML_COURSEID1].course_key
video_key = course_key.make_usage_key('video', 'Welcome')
chapter_key = course_key.make_usage_key('chapter', 'Overview')
should_work = (
(video_key,
(course_key, "Overview", "Welcome", None, None, video_key)),
(chapter_key,
(course_key, "Overview", None, None, None, chapter_key)),
)
for location, expected in should_work:
self.assertEqual(path_to_location(self.store, location), expected)
not_found = (
course_key.make_usage_key('video', 'WelcomeX'),
course_key.make_usage_key('course', 'NotHome'),
)
for location in not_found:
with self.assertRaises(ItemNotFoundError):
path_to_location(self.store, location)
def test_navigation_index(self):
"""
Make sure that navigation_index correctly parses the various position values that we might get from calls to
path_to_location
"""
self.assertEqual(1, navigation_index("1"))
self.assertEqual(10, navigation_index("10"))
self.assertEqual(None, navigation_index(None))
self.assertEqual(1, navigation_index("1_2"))
self.assertEqual(5, navigation_index("5_2"))
self.assertEqual(7, navigation_index("7_3_5_6_"))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_root_draft(self, default_ms):
"""
Test calling revert_to_published on draft vertical.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
vertical = self.store.get_item(self.vertical_x1a)
vertical_children_num = len(vertical.children)
self.store.publish(self.course.location, self.user_id)
self.assertFalse(self._has_changes(self.vertical_x1a))
# delete leaf problem (will make parent vertical a draft)
self.store.delete_item(self.problem_x1a_1, self.user_id)
self.assertTrue(self._has_changes(self.vertical_x1a))
draft_parent = self.store.get_item(self.vertical_x1a)
self.assertEqual(vertical_children_num - 1, len(draft_parent.children))
published_parent = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertEqual(vertical_children_num, len(published_parent.children))
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_parent = self.store.get_item(self.vertical_x1a)
self.assertEqual(vertical_children_num, len(published_parent.children))
self.assertBlocksEqualByFields(reverted_parent, published_parent)
self.assertFalse(self._has_changes(self.vertical_x1a))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_root_published(self, default_ms):
"""
Test calling revert_to_published on a published vertical with a draft child.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
problem = self.store.get_item(self.problem_x1a_1)
orig_display_name = problem.display_name
# Change display name of problem and update just it (so parent remains published)
problem.display_name = "updated before calling revert"
self.store.update_item(problem, self.user_id)
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_problem = self.store.get_item(self.problem_x1a_1)
self.assertEqual(orig_display_name, reverted_problem.display_name)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_no_draft(self, default_ms):
"""
Test calling revert_to_published on vertical with no draft content does nothing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
orig_vertical = self.store.get_item(self.vertical_x1a)
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_vertical = self.store.get_item(self.vertical_x1a)
self.assertBlocksEqualByFields(orig_vertical, reverted_vertical)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_no_published(self, default_ms):
"""
Test calling revert_to_published on vertical with no published version errors.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with self.assertRaises(InvalidVersionError):
self.store.revert_to_published(self.vertical_x1a, self.user_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_revert_to_published_direct_only(self, default_ms):
"""
Test calling revert_to_published on a direct-only item is a no-op.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
num_children = len(self.store.get_item(self.sequential_x1).children)
self.store.revert_to_published(self.sequential_x1, self.user_id)
reverted_parent = self.store.get_item(self.sequential_x1)
# It does not discard the child vertical, even though that child is a draft (with no published version)
self.assertEqual(num_children, len(reverted_parent.children))
# Draft: get all items which can be or should have parents
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_orphans(self, default_ms, max_find, max_send):
"""
Test finding orphans.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
# orphans
orphan_locations = [
course_id.make_usage_key('chapter', 'OrphanChapter'),
course_id.make_usage_key('vertical', 'OrphanVertical'),
course_id.make_usage_key('problem', 'OrphanProblem'),
course_id.make_usage_key('html', 'OrphanHTML'),
]
# detached items (not considered as orphans)
detached_locations = [
course_id.make_usage_key('static_tab', 'StaticTab'),
course_id.make_usage_key('course_info', 'updates'),
]
for location in orphan_locations + detached_locations:
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
with check_mongo_calls(max_find, max_send):
found_orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertItemsEqual(found_orphans, orphan_locations)
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_get_non_orphan_parents(self, default_ms):
"""
Test finding non orphan parents from many possible parents.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
# test that problem "problem_x1a_1" has only one published parent
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# add some published orphans
orphan_sequential = course_id.make_usage_key('sequential', 'OrphanSequential')
orphan_vertical = course_id.make_usage_key('vertical', 'OrphanVertical')
orphan_locations = [orphan_sequential, orphan_vertical]
for location in orphan_locations:
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
self.store.publish(location, self.user_id)
found_orphans = mongo_store.get_orphans(course_id)
self.assertEqual(set(found_orphans), set(orphan_locations))
self.assertEqual(len(set(found_orphans)), 2)
# add orphan vertical and sequential as another parents of problem "problem_x1a_1"
mongo_store.collection.update(
orphan_sequential.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
mongo_store.collection.update(
orphan_vertical.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# test that "get_parent_location" method of published branch still returns the correct non-orphan parent for
# problem "problem_x1a_1" since the two other parents are orphans
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# now add valid published vertical as another parent of problem
mongo_store.collection.update(
self.sequential_x1.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# now check that "get_parent_location" method of published branch raises "ReferentialIntegrityError" for
# problem "problem_x1a_1" since it has now 2 valid published parents
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
self.assertTrue(self.store.has_item(self.problem_x1a_1))
with self.assertRaises(ReferentialIntegrityError):
self.store.get_parent_location(self.problem_x1a_1)
@ddt.data(ModuleStoreEnum.Type.mongo)
def test_create_item_from_parent_location(self, default_ms):
"""
Test a code path missed by the above: passing an old-style location as parent but no
new location for the child
"""
self.initdb(default_ms)
self.store.create_child(
self.user_id,
self.course_locations[self.MONGO_COURSEID],
'problem',
block_id='orphan'
)
orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(orphans), 0, "unexpected orphans: {}".format(orphans))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_item_populates_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
self.assertEqual(self.user_id, block.edited_by)
self.assertGreater(datetime.datetime.now(UTC), block.edited_on)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_create_item_populates_subtree_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
self.assertEqual(self.user_id, block.subtree_edited_by)
self.assertGreater(datetime.datetime.now(UTC), block.subtree_edited_on)
# Draft: wildcard search of draft and split
# Split: wildcard search of draft and split
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_get_courses_for_wiki(self, default_ms, max_find, max_send):
"""
Test the get_courses_for_wiki method
"""
self.initdb(default_ms)
# Test XML wikis
wiki_courses = self.store.get_courses_for_wiki('toy')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, wiki_courses)
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, wiki_courses)
# Test Mongo wiki
with check_mongo_calls(max_find, max_send):
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None), # Branch agnostic
wiki_courses
)
self.assertEqual(len(self.store.get_courses_for_wiki('edX.simple.2012_Fall')), 0)
self.assertEqual(len(self.store.get_courses_for_wiki('no_such_wiki')), 0)
# Draft:
# Find: find vertical, find children
# Sends:
# 1. delete all of the published nodes in subtree
# 2. insert vertical as published (deleted in step 1) w/ the deleted problems as children
# 3-6. insert the 3 problems and 1 html as published
# Split: active_versions, 2 structures (pre & post published?)
# Sends:
# - insert structure
# - write index entry
@ddt.data((ModuleStoreEnum.Type.mongo, 2, 6), (ModuleStoreEnum.Type.split, 3, 2))
@ddt.unpack
def test_unpublish(self, default_ms, max_find, max_send):
"""
Test calling unpublish
"""
self.initdb(default_ms)
if default_ms == ModuleStoreEnum.Type.mongo and mongo_uses_error_check(self.store):
max_find += 1
self._create_block_hierarchy()
# publish
self.store.publish(self.course.location, self.user_id)
published_xblock = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertIsNotNone(published_xblock)
# unpublish
with check_mongo_calls(max_find, max_send):
self.store.unpublish(self.vertical_x1a, self.user_id)
with self.assertRaises(ItemNotFoundError):
self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
# make sure draft version still exists
draft_xblock = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.draft_only
)
self.assertIsNotNone(draft_xblock)
# Draft: specific query for revision None
# Split: active_versions, structure
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 0), (ModuleStoreEnum.Type.split, 2, 0))
@ddt.unpack
def test_has_published_version(self, default_ms, max_find, max_send):
"""
Test the has_published_version method
"""
self.initdb(default_ms)
self._create_block_hierarchy()
# start off as Private
item = self.store.create_child(self.user_id, self.writable_chapter_location, 'problem', 'test_compute_publish_state')
item_location = item.location
with check_mongo_calls(max_find, max_send):
self.assertFalse(self.store.has_published_version(item))
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Public -> Private
self.store.unpublish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertFalse(self.store.has_published_version(item))
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Public -> Draft with NO changes
self.store.convert_to_draft(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Draft WITH changes
item.display_name = 'new name'
item = self.store.update_item(item, self.user_id)
self.assertTrue(self.store.has_changes(item))
self.assertTrue(self.store.has_published_version(item))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_edit_info_ancestors(self, default_ms):
"""
Tests that edited_on, edited_by, subtree_edited_on, and subtree_edited_by are set correctly during update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
def check_node(location_key, after, before, edited_by, subtree_after, subtree_before, subtree_by):
"""
Checks that the node given by location_key matches the given edit_info constraints.
"""
node = self.store.get_item(location_key)
if after:
self.assertLess(after, node.edited_on)
self.assertLess(node.edited_on, before)
self.assertEqual(node.edited_by, edited_by)
if subtree_after:
self.assertLess(subtree_after, node.subtree_edited_on)
self.assertLess(node.subtree_edited_on, subtree_before)
self.assertEqual(node.subtree_edited_by, subtree_by)
with self.store.bulk_operations(test_course.id):
# Create a dummy vertical & html to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
block_id='test_vertical'
)
child = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html'
)
sibling = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html_no_change'
)
after_create = datetime.datetime.now(UTC)
# Verify that all nodes were last edited in the past by create_user
for block in [component, child, sibling]:
check_node(block.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the component, then check that there now are changes
component.display_name = 'Changed Display Name'
editing_user = self.user_id - 2
with self.store.bulk_operations(test_course.id): # TNL-764 bulk ops disabled ancestor updates
component = self.store.update_item(component, editing_user)
after_edit = datetime.datetime.now(UTC)
check_node(component.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# but child didn't change
check_node(child.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the child
child = self.store.get_item(child.location)
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=editing_user)
after_edit = datetime.datetime.now(UTC)
# Verify that child was last edited between after_create and after_edit by edit_user
check_node(child.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# Verify that ancestors edit info is unchanged, but their subtree edit info matches child
check_node(test_course.location, None, after_create, self.user_id, after_create, after_edit, editing_user)
# Verify that others have unchanged edit info
check_node(sibling.location, None, after_create, self.user_id, None, after_create, self.user_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_edit_info(self, default_ms):
"""
Tests that edited_on and edited_by are set correctly during an update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current edit time and verify that user created the component
self.assertEqual(component.edited_by, self.user_id)
old_edited_on = component.edited_on
edit_user = self.user_id - 2
# Change the component
component.display_name = 'Changed'
self.store.update_item(component, edit_user)
updated_component = self.store.get_item(component.location)
# Verify the ordering of edit times and that dummy_user made the edit
self.assertLess(old_edited_on, updated_component.edited_on)
self.assertEqual(updated_component.edited_by, edit_user)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_published_info(self, default_ms):
"""
Tests that published_on and published_by are set correctly
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
publish_user = 456
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current time, then publish
old_time = datetime.datetime.now(UTC)
self.store.publish(component.location, publish_user)
updated_component = self.store.get_item(component.location)
# Verify the time order and that publish_user caused publication
self.assertLessEqual(old_time, updated_component.published_on)
self.assertEqual(updated_component.published_by, publish_user)
# Verify that changing the item doesn't unset the published info
updated_component.display_name = 'changed'
self.store.update_item(updated_component, self.user_id)
updated_component = self.store.get_item(updated_component.location)
self.assertLessEqual(old_time, updated_component.published_on)
self.assertEqual(updated_component.published_by, publish_user)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_auto_publish(self, default_ms):
"""
Test that the correct things have been published automatically
Assumptions:
* we auto-publish courses, chapters, sequentials
* we don't auto-publish problems
"""
self.initdb(default_ms)
# test create_course to make sure we are autopublishing
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
self.assertTrue(self.store.has_published_version(test_course))
test_course_key = test_course.id
# test create_item of direct-only category to make sure we are autopublishing
chapter = self.store.create_child(self.user_id, test_course.location, 'chapter', 'Overview')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertIn(
chapter.location,
self.store.get_item(test_course.location).children,
)
self.assertTrue(self.store.has_published_version(chapter))
chapter_location = chapter.location
# test create_child of direct-only category to make sure we are autopublishing
sequential = self.store.create_child(self.user_id, chapter_location, 'sequential', 'Sequence')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertIn(
sequential.location,
self.store.get_item(chapter_location).children,
)
self.assertTrue(self.store.has_published_version(sequential))
# test update_item of direct-only category to make sure we are autopublishing
sequential.display_name = 'sequential1'
sequential = self.store.update_item(sequential, self.user_id)
self.assertTrue(self.store.has_published_version(sequential))
# test delete_item of direct-only category to make sure we are autopublishing
self.store.delete_item(sequential.location, self.user_id, revision=ModuleStoreEnum.RevisionOption.all)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertNotIn(
sequential.location,
self.store.get_item(chapter_location).children,
)
chapter = self.store.get_item(chapter.location.for_branch(None))
self.assertTrue(self.store.has_published_version(chapter))
# test create_child of NOT direct-only category to make sure we aren't autopublishing
problem_child = self.store.create_child(self.user_id, chapter_location, 'problem', 'Problem_Child')
self.assertFalse(self.store.has_published_version(problem_child))
# test create_item of NOT direct-only category to make sure we aren't autopublishing
problem_item = self.store.create_item(self.user_id, test_course_key, 'problem', 'Problem_Item')
self.assertFalse(self.store.has_published_version(problem_item))
# test update_item of NOT direct-only category to make sure we aren't autopublishing
problem_item.display_name = 'Problem_Item1'
problem_item = self.store.update_item(problem_item, self.user_id)
self.assertFalse(self.store.has_published_version(problem_item))
# test delete_item of NOT direct-only category to make sure we aren't autopublishing
self.store.delete_item(problem_child.location, self.user_id)
chapter = self.store.get_item(chapter.location.for_branch(None))
self.assertTrue(self.store.has_published_version(chapter))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_get_courses_for_wiki_shared(self, default_ms):
"""
Test two courses sharing the same wiki
"""
self.initdb(default_ms)
# verify initial state - initially, we should have a wiki for the Mongo course
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None), # Branch agnostic
wiki_courses
)
# set Mongo course to share the wiki with simple course
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'simple'
self.store.update_item(mongo_course, self.user_id)
# now mongo_course should not be retrievable with old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertEqual(len(wiki_courses), 0)
# but there should be two courses with wiki_slug 'simple'
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 2)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, wiki_courses)
# configure mongo course to use unique wiki_slug.
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'MITx.999.2013_Spring'
self.store.update_item(mongo_course, self.user_id)
# it should be retrievable with its new wiki_slug
wiki_courses = self.store.get_courses_for_wiki('MITx.999.2013_Spring')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
# and NOT retriveable with its old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 1)
self.assertNotIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
self.assertIn(
self.course_locations[self.XML_COURSEID2].course_key,
wiki_courses
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_branch_setting(self, default_ms):
"""
Test the branch_setting context manager
"""
self.initdb(default_ms)
self._create_block_hierarchy()
problem_location = self.problem_x1a_1.for_branch(None)
problem_original_name = 'Problem_x1a_1'
course_key = problem_location.course_key
problem_new_name = 'New Problem Name'
def assertNumProblems(display_name, expected_number):
"""
Asserts the number of problems with the given display name is the given expected number.
"""
self.assertEquals(
len(self.store.get_items(course_key.for_branch(None), settings={'display_name': display_name})),
expected_number
)
def assertProblemNameEquals(expected_display_name):
"""
Asserts the display_name of the xblock at problem_location matches the given expected value.
"""
# check the display_name of the problem
problem = self.store.get_item(problem_location)
self.assertEquals(problem.display_name, expected_display_name)
# there should be only 1 problem with the expected_display_name
assertNumProblems(expected_display_name, 1)
# verify Draft problem
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
self.assertTrue(self.store.has_item(problem_location))
assertProblemNameEquals(problem_original_name)
# verify Published problem doesn't exist
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self.assertFalse(self.store.has_item(problem_location))
with self.assertRaises(ItemNotFoundError):
self.store.get_item(problem_location)
# PUBLISH the problem
self.store.publish(self.vertical_x1a, self.user_id)
self.store.publish(problem_location, self.user_id)
# verify Published problem
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self.assertTrue(self.store.has_item(problem_location))
assertProblemNameEquals(problem_original_name)
# verify Draft-preferred
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_original_name)
# EDIT name
problem = self.store.get_item(problem_location)
problem.display_name = problem_new_name
self.store.update_item(problem, self.user_id)
# verify Draft problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_new_name)
# verify Published problem still has old name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_original_name)
# there should be no published problems with the new name
assertNumProblems(problem_new_name, 0)
# PUBLISH the problem
self.store.publish(problem_location, self.user_id)
# verify Published problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_new_name)
# there should be no published problems with the old name
assertNumProblems(problem_original_name, 0)
def verify_default_store(self, store_type):
"""
Verifies the default_store property
"""
self.assertEquals(self.store.default_modulestore.get_modulestore_type(), store_type)
# verify internal helper method
store = self.store._get_modulestore_for_courselike() # pylint: disable=protected-access
self.assertEquals(store.get_modulestore_type(), store_type)
# verify store used for creating a course
try:
course = self.store.create_course("org", "course{}".format(uuid4().hex[:5]), "run", self.user_id)
self.assertEquals(course.system.modulestore.get_modulestore_type(), store_type)
except NotImplementedError:
self.assertEquals(store_type, ModuleStoreEnum.Type.xml)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.xml)
def test_default_store(self, default_ms):
"""
Test the default store context manager
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.verify_default_store(default_ms)
def test_default_store_nested(self):
"""
Test the default store context manager, nested within one another
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(ModuleStoreEnum.Type.mongo):
self.verify_default_store(ModuleStoreEnum.Type.mongo)
with self.store.default_store(ModuleStoreEnum.Type.split):
self.verify_default_store(ModuleStoreEnum.Type.split)
with self.store.default_store(ModuleStoreEnum.Type.xml):
self.verify_default_store(ModuleStoreEnum.Type.xml)
self.verify_default_store(ModuleStoreEnum.Type.split)
self.verify_default_store(ModuleStoreEnum.Type.mongo)
def test_default_store_fake(self):
"""
Test the default store context manager, asking for a fake store
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
fake_store = "fake"
with self.assertRaisesRegexp(Exception, "Cannot find store of type {}".format(fake_store)):
with self.store.default_store(fake_store):
pass # pragma: no cover
def save_asset(self, asset_key):
"""
Load and save the given file. (taken from test_contentstore)
"""
with open("{}/static/{}".format(DATA_DIR, asset_key.block_id), "rb") as f:
content = StaticContent(
asset_key, "Funky Pix", mimetypes.guess_type(asset_key.block_id)[0], f.read(),
)
self.store.contentstore.save(content)
@ddt.data(
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.mongo],
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split],
[ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.split]
)
@ddt.unpack
def test_clone_course(self, source_modulestore, destination_modulestore):
"""
Test clone course
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(source_modulestore):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
self.save_asset(source_course_key.make_asset_key('asset', 'picture1.jpg'))
with self.store.default_store(destination_modulestore):
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(source_course_key, dest_course_id, self.user_id)
# pylint: disable=protected-access
source_store = self.store._get_modulestore_by_type(source_modulestore)
dest_store = self.store._get_modulestore_by_type(destination_modulestore)
self.assertCoursesEqual(source_store, source_course_key, dest_store, dest_course_id)
def test_clone_xml_split(self):
"""
Can clone xml courses to split; so, test it.
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={self.XML_COURSEID2: 'xml', })
source_course_key = CourseKey.from_string(self.XML_COURSEID2)
with self.store.default_store(ModuleStoreEnum.Type.split):
dest_course_id = CourseLocator("org.other", "course.other", "run.other")
self.store.clone_course(
source_course_key, dest_course_id, ModuleStoreEnum.UserID.test
)
# pylint: disable=protected-access
source_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml)
dest_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.split)
self.assertCoursesEqual(source_store, source_course_key, dest_store, dest_course_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_operations_signal_firing(self, default):
""" Signals should be fired right before bulk_operations() exits. """
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
course_key = course.id
def _clear_bulk_ops_record(course_key): # pylint: disable=unused-argument
"""
Check if the signal has been fired.
The course_published signal fires before the _clear_bulk_ops_record.
"""
signal_handler.send.assert_called_with('course_published', course_key=course.id)
with patch.object(
self.store.thread_cache.default_store, '_clear_bulk_ops_record', wraps=_clear_bulk_ops_record
) as mock_clear_bulk_ops_record:
with self.store.bulk_operations(course_key):
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_not_called()
self.assertEqual(mock_clear_bulk_ops_record.call_count, 1)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test non-draftable block types. The block should be published with every change.
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
signal_handler.reset_mock()
block = self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
block.display_name = block_type
self.store.update_item(block, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.publish(block.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_rerun_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test course re-runs
signal_handler.reset_mock()
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(course_key, dest_course_id, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=dest_course_id)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_import_firing(self, default, _from_json):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Test course imports
# Note: The signal is fired once when the course is created and
# a second time after the actual data import.
import_course_from_xml(
self.store, self.user_id, DATA_DIR, ['toy'], load_error_modules=False,
static_content_store=contentstore,
create_if_not_present=True,
)
signal_handler.send.assert_has_calls([
call('pre_publish', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('course_published', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('pre_publish', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
call('course_published', course_key=self.store.make_course_key('edX', 'toy', '2012_Fall')),
])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
signal_handler.reset_mock()
section = self.store.create_item(self.user_id, course.id, 'chapter')
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# 'units' and 'blocks' are draftable types
signal_handler.reset_mock()
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.reset_mock()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.unpublish(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
self.store.delete_item(unit.location, self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test non-draftable block types. No signals should be received until
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
block = self.store.create_item(self.user_id, course_key, block_type)
signal_handler.send.assert_not_called()
block.display_name = block_type
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
self.store.publish(block.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
signal_handler.send.assert_called_with('course_published', course_key=course.id)
course_key = course.id
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
section = self.store.create_item(self.user_id, course_key, 'chapter')
signal_handler.send.assert_not_called()
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
signal_handler.send.assert_not_called()
# 'units' and 'blocks' are draftable types
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.update_item(block, self.user_id)
signal_handler.send.assert_not_called()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
self.store.unpublish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
self.store.delete_item(unit.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
# Test editing draftable block type without publish
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
signal_handler.send.assert_not_called()
block = self.store.create_child(self.user_id, unit.location, 'problem')
signal_handler.send.assert_not_called()
self.store.publish(unit.location, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_called_with('course_published', course_key=course.id)
signal_handler.reset_mock()
with self.store.bulk_operations(course_key):
signal_handler.send.assert_not_called()
unit.display_name = "Change this unit"
self.store.update_item(unit, self.user_id)
signal_handler.send.assert_not_called()
signal_handler.send.assert_not_called()
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_deleted_signal(self, default):
with MongoContentstoreBuilder().build() as contentstore:
signal_handler = Mock(name='signal_handler')
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=signal_handler,
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
signal_handler.send.assert_not_called()
# Create a course
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
course_key = course.id
# Delete the course
course = self.store.delete_course(course_key, self.user_id)
# Verify that the signal was emitted
signal_handler.send.assert_called_with('course_deleted', course_key=course_key)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_published_item_orphans(self, default_store):
"""
Tests delete published item dont create any oprhans in course
"""
self.initdb(default_store)
course_locator = self.course.id
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
problem = self.store.create_child(
self.user_id, vertical.location, 'problem', block_id='problem'
)
self.store.publish(chapter.location, self.user_id)
# Verify that there are no changes
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(problem.location))
# No orphans in course
course_orphans = self.store.get_orphans(course_locator)
self.assertEqual(len(course_orphans), 0)
self.store.delete_item(vertical.location, self.user_id)
# No orphans in course after delete, except
# in old mongo, which still creates orphans
course_orphans = self.store.get_orphans(course_locator)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_orphans), 1)
else:
self.assertEqual(len(course_orphans), 0)
course_locator_publish = course_locator.for_branch(ModuleStoreEnum.BranchName.published)
# No published oprhans after delete, except
# in old mongo, which still creates orphans
course_publish_orphans = self.store.get_orphans(course_locator_publish)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_publish_orphans), 1)
else:
self.assertEqual(len(course_publish_orphans), 0)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_draft_item_orphans(self, default_store):
"""
Tests delete draft item create no orphans in course
"""
self.initdb(default_store)
course_locator = self.course.id
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
problem = self.store.create_child(
self.user_id, vertical.location, 'problem', block_id='problem'
)
self.store.publish(chapter.location, self.user_id)
# Verify that there are no changes
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(problem.location))
# No orphans in course
course_orphans = self.store.get_orphans(course_locator)
self.assertEqual(len(course_orphans), 0)
problem.display_name = 'changed'
problem = self.store.update_item(problem, self.user_id)
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(problem.location))
self.store.delete_item(vertical.location, self.user_id)
# No orphans in course after delete, except
# in old mongo, which still creates them
course_orphans = self.store.get_orphans(course_locator)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_orphans), 1)
else:
self.assertEqual(len(course_orphans), 0)
course_locator_publish = course_locator.for_branch(ModuleStoreEnum.BranchName.published)
# No published orphans after delete, except
# in old mongo, which still creates them
course_publish_orphans = self.store.get_orphans(course_locator_publish)
if default_store == ModuleStoreEnum.Type.mongo:
self.assertEqual(len(course_publish_orphans), 1)
else:
self.assertEqual(len(course_publish_orphans), 0)
@ddt.ddt
@attr('mongo')
class TestPublishOverExportImport(CommonMixedModuleStoreSetup):
"""
Tests which publish (or don't publish) items - and then export/import the course,
checking the state of the imported items.
"""
def setUp(self):
"""
Set up the database for testing
"""
super(TestPublishOverExportImport, self).setUp()
self.user_id = ModuleStoreEnum.UserID.test
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
def _export_import_course_round_trip(self, modulestore, contentstore, source_course_key, export_dir):
"""
Export the course from a modulestore and then re-import the course.
"""
top_level_export_dir = 'exported_source_course'
export_course_to_xml(
modulestore,
contentstore,
source_course_key,
export_dir,
top_level_export_dir,
)
import_course_from_xml(
modulestore,
'test_user',
export_dir,
source_dirs=[top_level_export_dir],
static_content_store=contentstore,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
@contextmanager
def _build_store(self, default_ms):
"""
Perform the modulestore-building and course creation steps for a mixed modulestore test.
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(default_ms):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
yield contentstore, source_course_key
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_draft_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an unpublished unit remains with no changes across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and don't publish it.
draft_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self._has_changes(draft_xblock.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Verify that the imported block still is a draft, i.e. has changes.
self.assertTrue(self._has_changes(draft_xblock.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
self.assertFalse(self._has_changes(published_xblock.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that it still is published, i.e. has no changes.
self.assertFalse(self._has_changes(published_xblock.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_changed_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
self.assertFalse(self._has_changes(published_xblock.location))
updated_display_name = 'Changed Display Name'
component = self.store.get_item(published_xblock.location)
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
self.assertTrue(self.store.has_changes(component))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(published_xblock.location))
# Verify that the changes in the draft vertical still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(published_xblock.location)
self.assertEqual(component.display_name, updated_display_name)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_seq_with_unpublished_vertical_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical - don't publish it!
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Retrieve the published block and make sure it's published.
# Chapter is published - but the changes in vertical below means it "has_changes".
self.assertTrue(self._has_changes(chapter.location))
# Sequential is published - but the changes in vertical below means it "has_changes".
self.assertTrue(self._has_changes(sequential.location))
# Vertical is unpublished - so it "has_changes".
self.assertTrue(self._has_changes(vertical.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(chapter.location))
self.assertTrue(self._has_changes(sequential.location))
self.assertTrue(self._has_changes(vertical.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_draft_and_published_unit_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
self.assertTrue(self._has_changes(vertical.location))
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Vertical has a new child -and- unit is unpublished. So both have changes.
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(unit.location))
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Publishing the unit separately has no effect on whether it has changes - it's already published.
self.store.publish(unit.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Retrieve the published block and make sure it's published.
self.store.publish(chapter.location, self.user_id)
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Now make changes to the unit - but don't publish them.
component = self.store.get_item(unit.location)
updated_display_name = 'Changed Display Name'
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
self.assertTrue(self._has_changes(component.location))
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(chapter.location))
self.assertTrue(self._has_changes(sequential.location))
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(unit.location))
# Verify that the changes in the draft unit still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, updated_display_name)
# Verify that the draft changes don't exist in the published unit - it still uses the default name.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, 'Text')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_published_unit_remains_published_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
self.assertTrue(self._has_changes(vertical.location))
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Now make changes to the unit.
updated_display_name = 'Changed Display Name'
unit.display_name = updated_display_name
unit = self.store.update_item(unit, self.user_id)
self.assertTrue(self._has_changes(unit.location))
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Verify that the published changes exist in the published unit.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, updated_display_name)
|
agpl-3.0
|
openshift/openshift-tools
|
scripts/monitoring/cron-send-os-skydns-checks.py
|
12
|
6971
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Send Openshift Master SkyDNS metric checks to Zagg
Openshift uses SkyDNS to locate services inside of the cluster.
Openshift implements SkyDNS a bit different. Normally SkyDNS uses etcd as a backend
for the DNS data to be stored. Openshift uses a special SkyDNS provider to map
Openshift services to IP's. More info can be found by looking at the source code here:
https://github.com/openshift/origin/blob/master/pkg/dns/serviceresolver.go
In short, the Openshift service name has a name, namespace and IP. The custom provider
can return many different variations of these for services, endpoints, an ports. The
variation that is used within this script is in the form of:
<name>.<namespace>.svc.cluster.local
This can be tested manually with dig from the command line the form:
$ dig @<nameserver> <name>.<namespace>.svc.cluster.local A
In this script, I am assuming that each Openshift service will have one and only one IP.
This *could* change and we will need to examine each of the IP's returned from Openshift
and SkyDNS
'''
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
# Accepting general Exceptions
#pylint: disable=broad-except
# Bot doesn't support openshift_tools libs
#pylint: disable=import-error
import argparse
from dns import resolver
from dns import exception as dns_exception
from openshift_tools.web.openshift_rest_api import OpenshiftRestApi
from openshift_tools.monitoring.metric_sender import MetricSender
import socket
import sys
class OpenshiftSkyDNSZaggClient(object):
""" Checks for the Openshift Master SkyDNS """
def __init__(self):
self.args = None
self.metric_sender = None
self.ora = OpenshiftRestApi()
self.dns_host = ''
self.dns_port = 53
self.openshift_services = []
def run(self):
""" Main function to run the check """
self.parse_args()
self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug)
self.get_openshift_services()
dns_host = [i for i in self.openshift_services if i['name'] == 'kubernetes' and i['namespace'] == 'default']
if len(dns_host) == 1:
self.dns_host = dns_host[0]['ip']
else:
print "\nUnable to find SKY DNS server."
print "Please run \"oc get services -n default\" to locate kubernetes service"
sys.exit(1)
if self.check_dns_port_alive():
self.do_dns_check()
self.metric_sender.send_metrics()
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Network metric sender')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
self.args = parser.parse_args()
def check_dns_port_alive(self):
""" Verify that the DNS port (TCP 53) is alive """
print "\nPerforming Openshift DNS port check..."
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((self.dns_host, self.dns_port))
s.close()
print "\nOpenshift SkyDNS host: %s, port: %s is OPEN" % (self.dns_host, self.dns_port)
print "================================================\n"
self.metric_sender.add_metric({'openshift.master.skydns.port.open' : 1})
return True
except socket.error, e:
print "\nOpenshift SkyDNS host: %s, port: %s is CLOSED" % (self.dns_host, self.dns_port)
print "Python Error: %s" % e
print "================================================\n"
self.metric_sender.add_metric({'openshift.master.skydns.port.open' : 0})
return False
def get_openshift_services(self):
""" Get a list of Openshift services that can be used to test against SkyDNS """
print "\nQuerying for Openshift services in the 'default' namespace...\n"
response = self.ora.get('/api/v1/namespaces/default/services')
for i in response['items']:
service = {}
service['name'] = i['metadata']['name']
service['namespace'] = i['metadata']['namespace']
service['ip'] = i['spec']['clusterIP']
self.openshift_services.append(service)
if self.args.verbose:
print "\nOpenshift Services found:\n"
print "{0:35} {1:25} {2:20}".format("Name", "Namespace", "IP")
for i in self.openshift_services:
print "{0:35} {1:25} {2:20}".format(i['name'], i['namespace'], i['ip'])
print "================================================\n"
def do_dns_check(self):
""" perform DNS checks against SkyDNS service """
print "\nPerforming DNS queries against SkyDNS...\n"
dns_resolver = resolver.Resolver(configure=False)
dns_resolver.nameservers.append(self.dns_host)
# Set dns_check to 1 (good) by default
dns_check = 1
for service in self.openshift_services:
name_to_resolve = service['name'] + '.' + service['namespace'] + '.svc.cluster.local'
try:
dns_answer = dns_resolver.query(name_to_resolve, 'A')
except dns_exception.DNSException as e:
print "Failed DNS lookup of %s. Error: %s" % (name_to_resolve, e)
print "\nTroubleshoot command: dig @%s %s A\n" % (self.dns_host, name_to_resolve)
dns_check = 0
break
if self.args.verbose:
print "\nQueryring for A record of %s on server %s" %(name_to_resolve, self.dns_host)
print "DNS Answer: %s" % dns_answer.rrset[0].address
print "Openshift Answer: %s" % service['ip']
if dns_answer.rrset[0].address != service['ip']:
dns_check = 0
print "================================================\n"
self.metric_sender.add_metric({'openshift.master.skydns.query' : dns_check})
if __name__ == '__main__':
OMSZC = OpenshiftSkyDNSZaggClient()
OMSZC.run()
|
apache-2.0
|
CVL-GitHub/karaage
|
karaage/legacy/institutes/south_migrations/0002_move_instutute_models_to_institutes_app.py
|
3
|
8280
|
# -*- coding: utf-8 -*-
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
depends_on = (
("people", "0012_move_instutute_models_to_institutes_app"),
)
def forwards(self, orm):
# moved logic to karaage.people.migrations.0012_move_instutute_models_to_institutes_app
pass
def backwards(self, orm):
# moved logic to karaage.people.migrations.0012_move_instutute_models_to_institutes_app
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'institutes.institute': {
'Meta': {'ordering': "['name']", 'object_name': 'Institute', 'db_table': "'institute'"},
'delegates': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'delegate'", 'to': "orm['people.Person']", 'through': "orm['institutes.InstituteDelegate']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'gid': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'saml_entityid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'institutes.institutedelegate': {
'Meta': {'object_name': 'InstituteDelegate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'people.person': {
'Meta': {'ordering': "['first_name', 'last_name']", 'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['institutes.Institute']"}),
'is_systemuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'login_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'saml_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['institutes']
|
gpl-3.0
|
morphis/home-assistant
|
homeassistant/components/light/yeelight.py
|
4
|
10411
|
"""
Support for Xiaomi Yeelight Wifi color bulb.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.yeelight/
"""
import logging
import colorsys
import voluptuous as vol
from homeassistant.util.color import (
color_temperature_mired_to_kelvin as mired_to_kelvin,
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_to_rgb)
from homeassistant.const import CONF_DEVICES, CONF_NAME
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_RGB_COLOR, ATTR_TRANSITION, ATTR_COLOR_TEMP,
ATTR_FLASH, FLASH_SHORT, FLASH_LONG,
SUPPORT_BRIGHTNESS, SUPPORT_RGB_COLOR, SUPPORT_TRANSITION,
SUPPORT_COLOR_TEMP, SUPPORT_FLASH,
Light, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['yeelight==0.2.2']
_LOGGER = logging.getLogger(__name__)
CONF_TRANSITION = "transition"
DEFAULT_TRANSITION = 350
CONF_SAVE_ON_CHANGE = "save_on_change"
CONF_MODE_MUSIC = "use_music_mode"
DOMAIN = 'yeelight'
DEVICE_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int,
vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean,
vol.Optional(CONF_SAVE_ON_CHANGE, default=True): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}, })
SUPPORT_YEELIGHT_RGB = (SUPPORT_RGB_COLOR |
SUPPORT_COLOR_TEMP)
SUPPORT_YEELIGHT = (SUPPORT_BRIGHTNESS |
SUPPORT_TRANSITION |
SUPPORT_FLASH)
def _cmd(func):
"""A wrapper to catch exceptions from the bulb."""
def _wrap(self, *args, **kwargs):
import yeelight
try:
_LOGGER.debug("Calling %s with %s %s", func, args, kwargs)
return func(self, *args, **kwargs)
except yeelight.BulbException as ex:
_LOGGER.error("Error when calling %s: %s", func, ex)
return _wrap
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yeelight bulbs."""
lights = []
if discovery_info is not None:
_LOGGER.debug("Adding autodetected %s", discovery_info['hostname'])
# not using hostname, as it seems to vary.
name = "yeelight_%s_%s" % (discovery_info["device_type"],
discovery_info["properties"]["mac"])
device = {'name': name, 'ipaddr': discovery_info['host']}
lights.append(YeelightLight(device, DEVICE_SCHEMA({})))
else:
for ipaddr, device_config in config[CONF_DEVICES].items():
_LOGGER.debug("Adding configured %s", device_config[CONF_NAME])
device = {'name': device_config[CONF_NAME], 'ipaddr': ipaddr}
lights.append(YeelightLight(device, device_config))
add_devices(lights, True) # true to request an update before adding.
class YeelightLight(Light):
"""Representation of a Yeelight light."""
def __init__(self, device, config):
"""Initialize the light."""
self.config = config
self._name = device['name']
self._ipaddr = device['ipaddr']
self._supported_features = SUPPORT_YEELIGHT
self._available = False
self._bulb_device = None
self._brightness = None
self._color_temp = None
self._is_on = None
self._rgb = None
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def unique_id(self) -> str:
"""Return the ID of this light."""
return "{}.{}".format(self.__class__, self._ipaddr)
@property
def color_temp(self) -> int:
"""Return the color temperature."""
return self._color_temp
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._is_on
@property
def brightness(self) -> int:
"""Return the brightness of this light between 1..255."""
return self._brightness
def _get_rgb_from_properties(self):
rgb = self._properties.get("rgb", None)
color_mode = self._properties.get("color_mode", None)
if not rgb or not color_mode:
return rgb
color_mode = int(color_mode)
if color_mode == 2: # color temperature
return color_temperature_to_rgb(self.color_temp)
if color_mode == 3: # hsv
hue = self._properties.get("hue")
sat = self._properties.get("sat")
val = self._properties.get("bright")
return colorsys.hsv_to_rgb(hue, sat, val)
rgb = int(rgb)
blue = rgb & 0xff
green = (rgb >> 8) & 0xff
red = (rgb >> 16) & 0xff
return red, green, blue
@property
def rgb_color(self) -> tuple:
"""Return the color property."""
return self._rgb
@property
def _properties(self) -> dict:
return self._bulb.last_properties
@property
def _bulb(self) -> object:
import yeelight
if self._bulb_device is None:
try:
self._bulb_device = yeelight.Bulb(self._ipaddr)
self._bulb_device.get_properties() # force init for type
btype = self._bulb_device.bulb_type
if btype == yeelight.BulbType.Color:
self._supported_features |= SUPPORT_YEELIGHT_RGB
self._available = True
except yeelight.BulbException as ex:
self._available = False
_LOGGER.error("Failed to connect to bulb %s, %s: %s",
self._ipaddr, self._name, ex)
return self._bulb_device
def set_music_mode(self, mode) -> None:
"""Set the music mode on or off."""
if mode:
self._bulb.start_music()
else:
self._bulb.stop_music()
def update(self) -> None:
"""Update properties from the bulb."""
import yeelight
try:
self._bulb.get_properties()
self._is_on = self._properties.get("power") == "on"
bright = self._properties.get("bright", None)
if bright:
self._brightness = 255 * (int(bright) / 100)
temp_in_k = self._properties.get("ct", None)
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
self._rgb = self._get_rgb_from_properties()
self._available = True
except yeelight.BulbException as ex:
if self._available: # just inform once
_LOGGER.error("Unable to update bulb status: %s", ex)
self._available = False
@_cmd
def set_brightness(self, brightness, duration) -> None:
"""Set bulb brightness."""
if brightness:
_LOGGER.debug("Setting brightness: %s", brightness)
self._bulb.set_brightness(brightness / 255 * 100,
duration=duration)
@_cmd
def set_rgb(self, rgb, duration) -> None:
"""Set bulb's color."""
if rgb and self.supported_features & SUPPORT_RGB_COLOR:
_LOGGER.debug("Setting RGB: %s", rgb)
self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration)
@_cmd
def set_colortemp(self, colortemp, duration) -> None:
"""Set bulb's color temperature."""
if colortemp and self.supported_features & SUPPORT_COLOR_TEMP:
temp_in_k = mired_to_kelvin(colortemp)
_LOGGER.debug("Setting color temp: %s K", temp_in_k)
self._bulb.set_color_temp(temp_in_k, duration=duration)
@_cmd
def set_default(self) -> None:
"""Set current options as default."""
self._bulb.set_default()
@_cmd
def set_flash(self, flash) -> None:
"""Activate flash."""
if flash:
from yeelight import RGBTransition, SleepTransition, Flow
if self._bulb.last_properties["color_mode"] != 1:
_LOGGER.error("Flash supported currently only in RGB mode.")
return
transition = int(self.config[CONF_TRANSITION])
if flash == FLASH_LONG:
count = 1
duration = transition * 5
if flash == FLASH_SHORT:
count = 1
duration = transition * 2
red, green, blue = self.rgb_color
transitions = list()
transitions.append(
RGBTransition(255, 0, 0, brightness=10, duration=duration))
transitions.append(SleepTransition(
duration=transition))
transitions.append(
RGBTransition(red, green, blue, brightness=self.brightness,
duration=duration))
flow = Flow(count=count, transitions=transitions)
self._bulb.start_flow(flow)
def turn_on(self, **kwargs) -> None:
"""Turn the bulb on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
rgb = kwargs.get(ATTR_RGB_COLOR)
flash = kwargs.get(ATTR_FLASH)
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
self._bulb.turn_on(duration=duration)
if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode:
self.set_music_mode(self.config[CONF_MODE_MUSIC])
# values checked for none in methods
self.set_rgb(rgb, duration)
self.set_colortemp(colortemp, duration)
self.set_brightness(brightness, duration)
self.set_flash(flash)
# save the current state if we had a manual change.
if self.config[CONF_SAVE_ON_CHANGE]:
if brightness or colortemp or rgb:
self.set_default()
def turn_off(self, **kwargs) -> None:
"""Turn off."""
self._bulb.turn_off()
|
apache-2.0
|
yohanko88/gem5-DC
|
src/python/m5/params.py
|
7
|
69061
|
# Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Nathan Binkert
# Gabe Black
# Andreas Hansson
#####################################################################
#
# Parameter description classes
#
# The _params dictionary in each class maps parameter names to either
# a Param or a VectorParam object. These objects contain the
# parameter description string, the parameter type, and the default
# value (if any). The convert() method on these objects is used to
# force whatever value is assigned to the parameter to the appropriate
# type.
#
# Note that the default values are loaded into the class's attribute
# space when the parameter dictionary is initialized (in
# MetaSimObject._new_param()); after that point they aren't used.
#
#####################################################################
import copy
import datetime
import re
import sys
import time
import math
import proxy
import ticks
from util import *
def isSimObject(*args, **kwargs):
return SimObject.isSimObject(*args, **kwargs)
def isSimObjectSequence(*args, **kwargs):
return SimObject.isSimObjectSequence(*args, **kwargs)
def isSimObjectClass(*args, **kwargs):
return SimObject.isSimObjectClass(*args, **kwargs)
allParams = {}
class MetaParamValue(type):
def __new__(mcls, name, bases, dct):
cls = super(MetaParamValue, mcls).__new__(mcls, name, bases, dct)
assert name not in allParams
allParams[name] = cls
return cls
# Dummy base class to identify types that are legitimate for SimObject
# parameters.
class ParamValue(object):
__metaclass__ = MetaParamValue
cmd_line_settable = False
# Generate the code needed as a prerequisite for declaring a C++
# object of this type. Typically generates one or more #include
# statements. Used when declaring parameters of this type.
@classmethod
def cxx_predecls(cls, code):
pass
# Generate the code needed as a prerequisite for including a
# reference to a C++ object of this type in a SWIG .i file.
# Typically generates one or more %import or %include statements.
@classmethod
def swig_predecls(cls, code):
pass
# default for printing to .ini file is regular string conversion.
# will be overridden in some cases
def ini_str(self):
return str(self)
# default for printing to .json file is regular string conversion.
# will be overridden in some cases, mostly to use native Python
# types where there are similar JSON types
def config_value(self):
return str(self)
# Prerequisites for .ini parsing with cxx_ini_parse
@classmethod
def cxx_ini_predecls(cls, code):
pass
# parse a .ini file entry for this param from string expression
# src into lvalue dest (of the param's C++ type)
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('// Unhandled param type: %s' % cls.__name__)
code('%s false;' % ret)
# allows us to blithely call unproxy() on things without checking
# if they're really proxies or not
def unproxy(self, base):
return self
# Produce a human readable version of the stored value
def pretty_print(self, value):
return str(value)
# Regular parameter description.
class ParamDesc(object):
def __init__(self, ptype_str, ptype, *args, **kwargs):
self.ptype_str = ptype_str
# remember ptype only if it is provided
if ptype != None:
self.ptype = ptype
if args:
if len(args) == 1:
self.desc = args[0]
elif len(args) == 2:
self.default = args[0]
self.desc = args[1]
else:
raise TypeError, 'too many arguments'
if kwargs.has_key('desc'):
assert(not hasattr(self, 'desc'))
self.desc = kwargs['desc']
del kwargs['desc']
if kwargs.has_key('default'):
assert(not hasattr(self, 'default'))
self.default = kwargs['default']
del kwargs['default']
if kwargs:
raise TypeError, 'extra unknown kwargs %s' % kwargs
if not hasattr(self, 'desc'):
raise TypeError, 'desc attribute missing'
def __getattr__(self, attr):
if attr == 'ptype':
ptype = SimObject.allClasses[self.ptype_str]
assert isSimObjectClass(ptype)
self.ptype = ptype
return ptype
raise AttributeError, "'%s' object has no attribute '%s'" % \
(type(self).__name__, attr)
def example_str(self):
if hasattr(self.ptype, "ex_str"):
return self.ptype.ex_str
else:
return self.ptype_str
# Is the param available to be exposed on the command line
def isCmdLineSettable(self):
if hasattr(self.ptype, "cmd_line_settable"):
return self.ptype.cmd_line_settable
else:
return False
def convert(self, value):
if isinstance(value, proxy.BaseProxy):
value.set_param_desc(self)
return value
if not hasattr(self, 'ptype') and isNullPointer(value):
# deferred evaluation of SimObject; continue to defer if
# we're just assigning a null pointer
return value
if isinstance(value, self.ptype):
return value
if isNullPointer(value) and isSimObjectClass(self.ptype):
return value
return self.ptype(value)
def pretty_print(self, value):
if isinstance(value, proxy.BaseProxy):
return str(value)
if isNullPointer(value):
return NULL
return self.ptype(value).pretty_print(value)
def cxx_predecls(self, code):
code('#include <cstddef>')
self.ptype.cxx_predecls(code)
def swig_predecls(self, code):
self.ptype.swig_predecls(code)
def cxx_decl(self, code):
code('${{self.ptype.cxx_type}} ${{self.name}};')
# Vector-valued parameter description. Just like ParamDesc, except
# that the value is a vector (list) of the specified type instead of a
# single value.
class VectorParamValue(list):
__metaclass__ = MetaParamValue
def __setattr__(self, attr, value):
raise AttributeError, \
"Not allowed to set %s on '%s'" % (attr, type(self).__name__)
def config_value(self):
return [v.config_value() for v in self]
def ini_str(self):
return ' '.join([v.ini_str() for v in self])
def getValue(self):
return [ v.getValue() for v in self ]
def unproxy(self, base):
if len(self) == 1 and isinstance(self[0], proxy.AllProxy):
return self[0].unproxy(base)
else:
return [v.unproxy(base) for v in self]
class SimObjectVector(VectorParamValue):
# support clone operation
def __call__(self, **kwargs):
return SimObjectVector([v(**kwargs) for v in self])
def clear_parent(self, old_parent):
for v in self:
v.clear_parent(old_parent)
def set_parent(self, parent, name):
if len(self) == 1:
self[0].set_parent(parent, name)
else:
width = int(math.ceil(math.log(len(self))/math.log(10)))
for i,v in enumerate(self):
v.set_parent(parent, "%s%0*d" % (name, width, i))
def has_parent(self):
return reduce(lambda x,y: x and y, [v.has_parent() for v in self])
# return 'cpu0 cpu1' etc. for print_ini()
def get_name(self):
return ' '.join([v._name for v in self])
# By iterating through the constituent members of the vector here
# we can nicely handle iterating over all a SimObject's children
# without having to provide lots of special functions on
# SimObjectVector directly.
def descendants(self):
for v in self:
for obj in v.descendants():
yield obj
def get_config_as_dict(self):
a = []
for v in self:
a.append(v.get_config_as_dict())
return a
# If we are replacing an item in the vector, make sure to set the
# parent reference of the new SimObject to be the same as the parent
# of the SimObject being replaced. Useful to have if we created
# a SimObjectVector of temporary objects that will be modified later in
# configuration scripts.
def __setitem__(self, key, value):
val = self[key]
if value.has_parent():
warn("SimObject %s already has a parent" % value.get_name() +\
" that is being overwritten by a SimObjectVector")
value.set_parent(val.get_parent(), val._name)
super(SimObjectVector, self).__setitem__(key, value)
# Enumerate the params of each member of the SimObject vector. Creates
# strings that will allow indexing into the vector by the python code and
# allow it to be specified on the command line.
def enumerateParams(self, flags_dict = {},
cmd_line_str = "",
access_str = ""):
if hasattr(self, "_paramEnumed"):
print "Cycle detected enumerating params at %s?!" % (cmd_line_str)
else:
x = 0
for vals in self:
# Each entry in the SimObjectVector should be an
# instance of a SimObject
flags_dict = vals.enumerateParams(flags_dict,
cmd_line_str + "%d." % x,
access_str + "[%d]." % x)
x = x + 1
return flags_dict
class VectorParamDesc(ParamDesc):
# Convert assigned value to appropriate type. If the RHS is not a
# list or tuple, it generates a single-element list.
def convert(self, value):
if isinstance(value, (list, tuple)):
# list: coerce each element into new list
tmp_list = [ ParamDesc.convert(self, v) for v in value ]
elif isinstance(value, str):
# If input is a csv string
tmp_list = [ ParamDesc.convert(self, v) \
for v in value.strip('[').strip(']').split(',') ]
else:
# singleton: coerce to a single-element list
tmp_list = [ ParamDesc.convert(self, value) ]
if isSimObjectSequence(tmp_list):
return SimObjectVector(tmp_list)
else:
return VectorParamValue(tmp_list)
# Produce a human readable example string that describes
# how to set this vector parameter in the absence of a default
# value.
def example_str(self):
s = super(VectorParamDesc, self).example_str()
help_str = "[" + s + "," + s + ", ...]"
return help_str
# Produce a human readable representation of the value of this vector param.
def pretty_print(self, value):
if isinstance(value, (list, tuple)):
tmp_list = [ ParamDesc.pretty_print(self, v) for v in value ]
elif isinstance(value, str):
tmp_list = [ ParamDesc.pretty_print(self, v) for v in value.split(',') ]
else:
tmp_list = [ ParamDesc.pretty_print(self, value) ]
return tmp_list
# This is a helper function for the new config system
def __call__(self, value):
if isinstance(value, (list, tuple)):
# list: coerce each element into new list
tmp_list = [ ParamDesc.convert(self, v) for v in value ]
elif isinstance(value, str):
# If input is a csv string
tmp_list = [ ParamDesc.convert(self, v) \
for v in value.strip('[').strip(']').split(',') ]
else:
# singleton: coerce to a single-element list
tmp_list = [ ParamDesc.convert(self, value) ]
return VectorParamValue(tmp_list)
def swig_module_name(self):
return "%s_vector" % self.ptype_str
def swig_predecls(self, code):
code('%import "${{self.swig_module_name()}}.i"')
def swig_decl(self, code):
code('%module(package="m5.internal") ${{self.swig_module_name()}}')
code('%{')
self.ptype.cxx_predecls(code)
code('%}')
code()
# Make sure the SWIGPY_SLICE_ARG is defined through this inclusion
code('%include "std_container.i"')
code()
self.ptype.swig_predecls(code)
code()
code('%include "std_vector.i"')
code()
ptype = self.ptype_str
cxx_type = self.ptype.cxx_type
code('%template(vector_$ptype) std::vector< $cxx_type >;')
def cxx_predecls(self, code):
code('#include <vector>')
self.ptype.cxx_predecls(code)
def cxx_decl(self, code):
code('std::vector< ${{self.ptype.cxx_type}} > ${{self.name}};')
class ParamFactory(object):
def __init__(self, param_desc_class, ptype_str = None):
self.param_desc_class = param_desc_class
self.ptype_str = ptype_str
def __getattr__(self, attr):
if self.ptype_str:
attr = self.ptype_str + '.' + attr
return ParamFactory(self.param_desc_class, attr)
# E.g., Param.Int(5, "number of widgets")
def __call__(self, *args, **kwargs):
ptype = None
try:
ptype = allParams[self.ptype_str]
except KeyError:
# if name isn't defined yet, assume it's a SimObject, and
# try to resolve it later
pass
return self.param_desc_class(self.ptype_str, ptype, *args, **kwargs)
Param = ParamFactory(ParamDesc)
VectorParam = ParamFactory(VectorParamDesc)
#####################################################################
#
# Parameter Types
#
# Though native Python types could be used to specify parameter types
# (the 'ptype' field of the Param and VectorParam classes), it's more
# flexible to define our own set of types. This gives us more control
# over how Python expressions are converted to values (via the
# __init__() constructor) and how these values are printed out (via
# the __str__() conversion method).
#
#####################################################################
# String-valued parameter. Just mixin the ParamValue class with the
# built-in str class.
class String(ParamValue,str):
cxx_type = 'std::string'
cmd_line_settable = True
@classmethod
def cxx_predecls(self, code):
code('#include <string>')
@classmethod
def swig_predecls(cls, code):
code('%include "std_string.i"')
def __call__(self, value):
self = value
return value
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s = %s;' % (dest, src))
code('%s true;' % ret)
def getValue(self):
return self
# superclass for "numeric" parameter values, to emulate math
# operations in a type-safe way. e.g., a Latency times an int returns
# a new Latency object.
class NumericParamValue(ParamValue):
def __str__(self):
return str(self.value)
def __float__(self):
return float(self.value)
def __long__(self):
return long(self.value)
def __int__(self):
return int(self.value)
# hook for bounds checking
def _check(self):
return
def __mul__(self, other):
newobj = self.__class__(self)
newobj.value *= other
newobj._check()
return newobj
__rmul__ = __mul__
def __div__(self, other):
newobj = self.__class__(self)
newobj.value /= other
newobj._check()
return newobj
def __sub__(self, other):
newobj = self.__class__(self)
newobj.value -= other
newobj._check()
return newobj
def config_value(self):
return self.value
@classmethod
def cxx_ini_predecls(cls, code):
# Assume that base/str.hh will be included anyway
# code('#include "base/str.hh"')
pass
# The default for parsing PODs from an .ini entry is to extract from an
# istringstream and let overloading choose the right type according to
# the dest type.
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s to_number(%s, %s);' % (ret, src, dest))
# Metaclass for bounds-checked integer parameters. See CheckedInt.
class CheckedIntType(MetaParamValue):
def __init__(cls, name, bases, dict):
super(CheckedIntType, cls).__init__(name, bases, dict)
# CheckedInt is an abstract base class, so we actually don't
# want to do any processing on it... the rest of this code is
# just for classes that derive from CheckedInt.
if name == 'CheckedInt':
return
if not (hasattr(cls, 'min') and hasattr(cls, 'max')):
if not (hasattr(cls, 'size') and hasattr(cls, 'unsigned')):
panic("CheckedInt subclass %s must define either\n" \
" 'min' and 'max' or 'size' and 'unsigned'\n",
name);
if cls.unsigned:
cls.min = 0
cls.max = 2 ** cls.size - 1
else:
cls.min = -(2 ** (cls.size - 1))
cls.max = (2 ** (cls.size - 1)) - 1
# Abstract superclass for bounds-checked integer parameters. This
# class is subclassed to generate parameter classes with specific
# bounds. Initialization of the min and max bounds is done in the
# metaclass CheckedIntType.__init__.
class CheckedInt(NumericParamValue):
__metaclass__ = CheckedIntType
cmd_line_settable = True
def _check(self):
if not self.min <= self.value <= self.max:
raise TypeError, 'Integer param out of bounds %d < %d < %d' % \
(self.min, self.value, self.max)
def __init__(self, value):
if isinstance(value, str):
self.value = convert.toInteger(value)
elif isinstance(value, (int, long, float, NumericParamValue)):
self.value = long(value)
else:
raise TypeError, "Can't convert object of type %s to CheckedInt" \
% type(value).__name__
self._check()
def __call__(self, value):
self.__init__(value)
return value
@classmethod
def cxx_predecls(cls, code):
# most derived types require this, so we just do it here once
code('#include "base/types.hh"')
@classmethod
def swig_predecls(cls, code):
# most derived types require this, so we just do it here once
code('%import "stdint.i"')
code('%import "base/types.hh"')
def getValue(self):
return long(self.value)
class Int(CheckedInt): cxx_type = 'int'; size = 32; unsigned = False
class Unsigned(CheckedInt): cxx_type = 'unsigned'; size = 32; unsigned = True
class Int8(CheckedInt): cxx_type = 'int8_t'; size = 8; unsigned = False
class UInt8(CheckedInt): cxx_type = 'uint8_t'; size = 8; unsigned = True
class Int16(CheckedInt): cxx_type = 'int16_t'; size = 16; unsigned = False
class UInt16(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Int32(CheckedInt): cxx_type = 'int32_t'; size = 32; unsigned = False
class UInt32(CheckedInt): cxx_type = 'uint32_t'; size = 32; unsigned = True
class Int64(CheckedInt): cxx_type = 'int64_t'; size = 64; unsigned = False
class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True
class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True
class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True
class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Percent(CheckedInt): cxx_type = 'int'; min = 0; max = 100
class Cycles(CheckedInt):
cxx_type = 'Cycles'
size = 64
unsigned = True
def getValue(self):
from m5.internal.core import Cycles
return Cycles(self.value)
@classmethod
def cxx_ini_predecls(cls, code):
# Assume that base/str.hh will be included anyway
# code('#include "base/str.hh"')
pass
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('uint64_t _temp;')
code('bool _ret = to_number(%s, _temp);' % src)
code('if (_ret)')
code(' %s = Cycles(_temp);' % dest)
code('%s _ret;' % ret)
class Float(ParamValue, float):
cxx_type = 'double'
cmd_line_settable = True
def __init__(self, value):
if isinstance(value, (int, long, float, NumericParamValue, Float, str)):
self.value = float(value)
else:
raise TypeError, "Can't convert object of type %s to Float" \
% type(value).__name__
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
return float(self.value)
def config_value(self):
return self
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class MemorySize(CheckedInt):
cxx_type = 'uint64_t'
ex_str = '512MB'
size = 64
unsigned = True
def __init__(self, value):
if isinstance(value, MemorySize):
self.value = value.value
else:
self.value = convert.toMemorySize(value)
self._check()
class MemorySize32(CheckedInt):
cxx_type = 'uint32_t'
ex_str = '512MB'
size = 32
unsigned = True
def __init__(self, value):
if isinstance(value, MemorySize):
self.value = value.value
else:
self.value = convert.toMemorySize(value)
self._check()
class Addr(CheckedInt):
cxx_type = 'Addr'
size = 64
unsigned = True
def __init__(self, value):
if isinstance(value, Addr):
self.value = value.value
else:
try:
# Often addresses are referred to with sizes. Ex: A device
# base address is at "512MB". Use toMemorySize() to convert
# these into addresses. If the address is not specified with a
# "size", an exception will occur and numeric translation will
# proceed below.
self.value = convert.toMemorySize(value)
except (TypeError, ValueError):
# Convert number to string and use long() to do automatic
# base conversion (requires base=0 for auto-conversion)
self.value = long(str(value), base=0)
self._check()
def __add__(self, other):
if isinstance(other, Addr):
return self.value + other.value
else:
return self.value + other
def pretty_print(self, value):
try:
val = convert.toMemorySize(value)
except TypeError:
val = long(value)
return "0x%x" % long(val)
class AddrRange(ParamValue):
cxx_type = 'AddrRange'
def __init__(self, *args, **kwargs):
# Disable interleaving and hashing by default
self.intlvHighBit = 0
self.xorHighBit = 0
self.intlvBits = 0
self.intlvMatch = 0
def handle_kwargs(self, kwargs):
# An address range needs to have an upper limit, specified
# either explicitly with an end, or as an offset using the
# size keyword.
if 'end' in kwargs:
self.end = Addr(kwargs.pop('end'))
elif 'size' in kwargs:
self.end = self.start + Addr(kwargs.pop('size')) - 1
else:
raise TypeError, "Either end or size must be specified"
# Now on to the optional bit
if 'intlvHighBit' in kwargs:
self.intlvHighBit = int(kwargs.pop('intlvHighBit'))
if 'xorHighBit' in kwargs:
self.xorHighBit = int(kwargs.pop('xorHighBit'))
if 'intlvBits' in kwargs:
self.intlvBits = int(kwargs.pop('intlvBits'))
if 'intlvMatch' in kwargs:
self.intlvMatch = int(kwargs.pop('intlvMatch'))
if len(args) == 0:
self.start = Addr(kwargs.pop('start'))
handle_kwargs(self, kwargs)
elif len(args) == 1:
if kwargs:
self.start = Addr(args[0])
handle_kwargs(self, kwargs)
elif isinstance(args[0], (list, tuple)):
self.start = Addr(args[0][0])
self.end = Addr(args[0][1])
else:
self.start = Addr(0)
self.end = Addr(args[0]) - 1
elif len(args) == 2:
self.start = Addr(args[0])
self.end = Addr(args[1])
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
def __str__(self):
return '%s:%s:%s:%s:%s:%s' \
% (self.start, self.end, self.intlvHighBit, self.xorHighBit,\
self.intlvBits, self.intlvMatch)
def size(self):
# Divide the size by the size of the interleaving slice
return (long(self.end) - long(self.start) + 1) >> self.intlvBits
@classmethod
def cxx_predecls(cls, code):
Addr.cxx_predecls(code)
code('#include "base/addr_range.hh"')
@classmethod
def swig_predecls(cls, code):
Addr.swig_predecls(code)
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('uint64_t _start, _end, _intlvHighBit = 0, _xorHighBit = 0;')
code('uint64_t _intlvBits = 0, _intlvMatch = 0;')
code('char _sep;')
code('std::istringstream _stream(${src});')
code('_stream >> _start;')
code('_stream.get(_sep);')
code('_stream >> _end;')
code('if (!_stream.fail() && !_stream.eof()) {')
code(' _stream.get(_sep);')
code(' _stream >> _intlvHighBit;')
code(' _stream.get(_sep);')
code(' _stream >> _xorHighBit;')
code(' _stream.get(_sep);')
code(' _stream >> _intlvBits;')
code(' _stream.get(_sep);')
code(' _stream >> _intlvMatch;')
code('}')
code('bool _ret = !_stream.fail() &&'
'_stream.eof() && _sep == \':\';')
code('if (_ret)')
code(' ${dest} = AddrRange(_start, _end, _intlvHighBit, \
_xorHighBit, _intlvBits, _intlvMatch);')
code('${ret} _ret;')
def getValue(self):
# Go from the Python class to the wrapped C++ class generated
# by swig
from m5.internal.range import AddrRange
return AddrRange(long(self.start), long(self.end),
int(self.intlvHighBit), int(self.xorHighBit),
int(self.intlvBits), int(self.intlvMatch))
# Boolean parameter type. Python doesn't let you subclass bool, since
# it doesn't want to let you create multiple instances of True and
# False. Thus this is a little more complicated than String.
class Bool(ParamValue):
cxx_type = 'bool'
cmd_line_settable = True
def __init__(self, value):
try:
self.value = convert.toBool(value)
except TypeError:
self.value = bool(value)
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
return bool(self.value)
def __str__(self):
return str(self.value)
# implement truth value testing for Bool parameters so that these params
# evaluate correctly during the python configuration phase
def __nonzero__(self):
return bool(self.value)
def ini_str(self):
if self.value:
return 'true'
return 'false'
def config_value(self):
return self.value
@classmethod
def cxx_ini_predecls(cls, code):
# Assume that base/str.hh will be included anyway
# code('#include "base/str.hh"')
pass
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('%s to_bool(%s, %s);' % (ret, src, dest))
def IncEthernetAddr(addr, val = 1):
bytes = map(lambda x: int(x, 16), addr.split(':'))
bytes[5] += val
for i in (5, 4, 3, 2, 1):
val,rem = divmod(bytes[i], 256)
bytes[i] = rem
if val == 0:
break
bytes[i - 1] += val
assert(bytes[0] <= 255)
return ':'.join(map(lambda x: '%02x' % x, bytes))
_NextEthernetAddr = "00:90:00:00:00:01"
def NextEthernetAddr():
global _NextEthernetAddr
value = _NextEthernetAddr
_NextEthernetAddr = IncEthernetAddr(_NextEthernetAddr, 1)
return value
class EthernetAddr(ParamValue):
cxx_type = 'Net::EthAddr'
ex_str = "00:90:00:00:00:01"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, value):
if value == NextEthernetAddr:
self.value = value
return
if not isinstance(value, str):
raise TypeError, "expected an ethernet address and didn't get one"
bytes = value.split(':')
if len(bytes) != 6:
raise TypeError, 'invalid ethernet address %s' % value
for byte in bytes:
if not 0 <= int(byte, base=16) <= 0xff:
raise TypeError, 'invalid ethernet address %s' % value
self.value = value
def __call__(self, value):
self.__init__(value)
return value
def unproxy(self, base):
if self.value == NextEthernetAddr:
return EthernetAddr(self.value())
return self
def getValue(self):
from m5.internal.params import EthAddr
return EthAddr(self.value)
def __str__(self):
return self.value
def ini_str(self):
return self.value
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s = Net::EthAddr(%s);' % (dest, src))
code('%s true;' % ret)
# When initializing an IpAddress, pass in an existing IpAddress, a string of
# the form "a.b.c.d", or an integer representing an IP.
class IpAddress(ParamValue):
cxx_type = 'Net::IpAddress'
ex_str = "127.0.0.1"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, value):
if isinstance(value, IpAddress):
self.ip = value.ip
else:
try:
self.ip = convert.toIpAddress(value)
except TypeError:
self.ip = long(value)
self.verifyIp()
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
tup = [(self.ip >> i) & 0xff for i in (24, 16, 8, 0)]
return '%d.%d.%d.%d' % tuple(tup)
def __eq__(self, other):
if isinstance(other, IpAddress):
return self.ip == other.ip
elif isinstance(other, str):
try:
return self.ip == convert.toIpAddress(other)
except:
return False
else:
return self.ip == other
def __ne__(self, other):
return not (self == other)
def verifyIp(self):
if self.ip < 0 or self.ip >= (1 << 32):
raise TypeError, "invalid ip address %#08x" % self.ip
def getValue(self):
from m5.internal.params import IpAddress
return IpAddress(self.ip)
# When initializing an IpNetmask, pass in an existing IpNetmask, a string of
# the form "a.b.c.d/n" or "a.b.c.d/e.f.g.h", or an ip and netmask as
# positional or keyword arguments.
class IpNetmask(IpAddress):
cxx_type = 'Net::IpNetmask'
ex_str = "127.0.0.0/24"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, *args, **kwargs):
def handle_kwarg(self, kwargs, key, elseVal = None):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
elif elseVal:
setattr(self, key, elseVal)
else:
raise TypeError, "No value set for %s" % key
if len(args) == 0:
handle_kwarg(self, kwargs, 'ip')
handle_kwarg(self, kwargs, 'netmask')
elif len(args) == 1:
if kwargs:
if not 'ip' in kwargs and not 'netmask' in kwargs:
raise TypeError, "Invalid arguments"
handle_kwarg(self, kwargs, 'ip', args[0])
handle_kwarg(self, kwargs, 'netmask', args[0])
elif isinstance(args[0], IpNetmask):
self.ip = args[0].ip
self.netmask = args[0].netmask
else:
(self.ip, self.netmask) = convert.toIpNetmask(args[0])
elif len(args) == 2:
self.ip = args[0]
self.netmask = args[1]
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
self.verify()
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
return "%s/%d" % (super(IpNetmask, self).__str__(), self.netmask)
def __eq__(self, other):
if isinstance(other, IpNetmask):
return self.ip == other.ip and self.netmask == other.netmask
elif isinstance(other, str):
try:
return (self.ip, self.netmask) == convert.toIpNetmask(other)
except:
return False
else:
return False
def verify(self):
self.verifyIp()
if self.netmask < 0 or self.netmask > 32:
raise TypeError, "invalid netmask %d" % netmask
def getValue(self):
from m5.internal.params import IpNetmask
return IpNetmask(self.ip, self.netmask)
# When initializing an IpWithPort, pass in an existing IpWithPort, a string of
# the form "a.b.c.d:p", or an ip and port as positional or keyword arguments.
class IpWithPort(IpAddress):
cxx_type = 'Net::IpWithPort'
ex_str = "127.0.0.1:80"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/inet.hh"')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/inet.i"')
def __init__(self, *args, **kwargs):
def handle_kwarg(self, kwargs, key, elseVal = None):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
elif elseVal:
setattr(self, key, elseVal)
else:
raise TypeError, "No value set for %s" % key
if len(args) == 0:
handle_kwarg(self, kwargs, 'ip')
handle_kwarg(self, kwargs, 'port')
elif len(args) == 1:
if kwargs:
if not 'ip' in kwargs and not 'port' in kwargs:
raise TypeError, "Invalid arguments"
handle_kwarg(self, kwargs, 'ip', args[0])
handle_kwarg(self, kwargs, 'port', args[0])
elif isinstance(args[0], IpWithPort):
self.ip = args[0].ip
self.port = args[0].port
else:
(self.ip, self.port) = convert.toIpWithPort(args[0])
elif len(args) == 2:
self.ip = args[0]
self.port = args[1]
else:
raise TypeError, "Too many arguments specified"
if kwargs:
raise TypeError, "Too many keywords: %s" % kwargs.keys()
self.verify()
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
return "%s:%d" % (super(IpWithPort, self).__str__(), self.port)
def __eq__(self, other):
if isinstance(other, IpWithPort):
return self.ip == other.ip and self.port == other.port
elif isinstance(other, str):
try:
return (self.ip, self.port) == convert.toIpWithPort(other)
except:
return False
else:
return False
def verify(self):
self.verifyIp()
if self.port < 0 or self.port > 0xffff:
raise TypeError, "invalid port %d" % self.port
def getValue(self):
from m5.internal.params import IpWithPort
return IpWithPort(self.ip, self.port)
time_formats = [ "%a %b %d %H:%M:%S %Z %Y",
"%a %b %d %H:%M:%S %Y",
"%Y/%m/%d %H:%M:%S",
"%Y/%m/%d %H:%M",
"%Y/%m/%d",
"%m/%d/%Y %H:%M:%S",
"%m/%d/%Y %H:%M",
"%m/%d/%Y",
"%m/%d/%y %H:%M:%S",
"%m/%d/%y %H:%M",
"%m/%d/%y"]
def parse_time(value):
from time import gmtime, strptime, struct_time, time
from datetime import datetime, date
if isinstance(value, struct_time):
return value
if isinstance(value, (int, long)):
return gmtime(value)
if isinstance(value, (datetime, date)):
return value.timetuple()
if isinstance(value, str):
if value in ('Now', 'Today'):
return time.gmtime(time.time())
for format in time_formats:
try:
return strptime(value, format)
except ValueError:
pass
raise ValueError, "Could not parse '%s' as a time" % value
class Time(ParamValue):
cxx_type = 'tm'
@classmethod
def cxx_predecls(cls, code):
code('#include <time.h>')
@classmethod
def swig_predecls(cls, code):
code('%include "python/swig/time.i"')
def __init__(self, value):
self.value = parse_time(value)
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
from m5.internal.params import tm
c_time = tm()
py_time = self.value
# UNIX is years since 1900
c_time.tm_year = py_time.tm_year - 1900;
# Python starts at 1, UNIX starts at 0
c_time.tm_mon = py_time.tm_mon - 1;
c_time.tm_mday = py_time.tm_mday;
c_time.tm_hour = py_time.tm_hour;
c_time.tm_min = py_time.tm_min;
c_time.tm_sec = py_time.tm_sec;
# Python has 0 as Monday, UNIX is 0 as sunday
c_time.tm_wday = py_time.tm_wday + 1
if c_time.tm_wday > 6:
c_time.tm_wday -= 7;
# Python starts at 1, Unix starts at 0
c_time.tm_yday = py_time.tm_yday - 1;
return c_time
def __str__(self):
return time.asctime(self.value)
def ini_str(self):
return str(self)
def get_config_as_dict(self):
assert false
return str(self)
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <time.h>')
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('char *_parse_ret = strptime((${src}).c_str(),')
code(' "%a %b %d %H:%M:%S %Y", &(${dest}));')
code('${ret} _parse_ret && *_parse_ret == \'\\0\';');
# Enumerated types are a little more complex. The user specifies the
# type as Enum(foo) where foo is either a list or dictionary of
# alternatives (typically strings, but not necessarily so). (In the
# long run, the integer value of the parameter will be the list index
# or the corresponding dictionary value. For now, since we only check
# that the alternative is valid and then spit it into a .ini file,
# there's not much point in using the dictionary.)
# What Enum() must do is generate a new type encapsulating the
# provided list/dictionary so that specific values of the parameter
# can be instances of that type. We define two hidden internal
# classes (_ListEnum and _DictEnum) to serve as base classes, then
# derive the new type from the appropriate base class on the fly.
allEnums = {}
# Metaclass for Enum types
class MetaEnum(MetaParamValue):
def __new__(mcls, name, bases, dict):
assert name not in allEnums
cls = super(MetaEnum, mcls).__new__(mcls, name, bases, dict)
allEnums[name] = cls
return cls
def __init__(cls, name, bases, init_dict):
if init_dict.has_key('map'):
if not isinstance(cls.map, dict):
raise TypeError, "Enum-derived class attribute 'map' " \
"must be of type dict"
# build list of value strings from map
cls.vals = cls.map.keys()
cls.vals.sort()
elif init_dict.has_key('vals'):
if not isinstance(cls.vals, list):
raise TypeError, "Enum-derived class attribute 'vals' " \
"must be of type list"
# build string->value map from vals sequence
cls.map = {}
for idx,val in enumerate(cls.vals):
cls.map[val] = idx
else:
raise TypeError, "Enum-derived class must define "\
"attribute 'map' or 'vals'"
cls.cxx_type = 'Enums::%s' % name
super(MetaEnum, cls).__init__(name, bases, init_dict)
# Generate C++ class declaration for this enum type.
# Note that we wrap the enum in a class/struct to act as a namespace,
# so that the enum strings can be brief w/o worrying about collisions.
def cxx_decl(cls, code):
wrapper_name = cls.wrapper_name
wrapper = 'struct' if cls.wrapper_is_struct else 'namespace'
name = cls.__name__ if cls.enum_name is None else cls.enum_name
idem_macro = '__ENUM__%s__%s__' % (wrapper_name, name)
code('''\
#ifndef $idem_macro
#define $idem_macro
$wrapper $wrapper_name {
enum $name {
''')
code.indent(2)
for val in cls.vals:
code('$val = ${{cls.map[val]}},')
code('Num_$name = ${{len(cls.vals)}}')
code.dedent(2)
code(' };')
if cls.wrapper_is_struct:
code(' static const char *${name}Strings[Num_${name}];')
code('};')
else:
code('extern const char *${name}Strings[Num_${name}];')
code('}')
code()
code('#endif // $idem_macro')
def cxx_def(cls, code):
wrapper_name = cls.wrapper_name
file_name = cls.__name__
name = cls.__name__ if cls.enum_name is None else cls.enum_name
code('#include "enums/$file_name.hh"')
if cls.wrapper_is_struct:
code('const char *${wrapper_name}::${name}Strings'
'[Num_${name}] =')
else:
code('namespace Enums {')
code.indent(1)
code(' const char *${name}Strings[Num_${name}] =')
code('{')
code.indent(1)
for val in cls.vals:
code('"$val",')
code.dedent(1)
code('};')
if not cls.wrapper_is_struct:
code('} // namespace $wrapper_name')
code.dedent(1)
def swig_decl(cls, code):
name = cls.__name__
code('''\
%module(package="m5.internal") enum_$name
%{
#include "enums/$name.hh"
%}
%include "enums/$name.hh"
''')
# Base class for enum types.
class Enum(ParamValue):
__metaclass__ = MetaEnum
vals = []
cmd_line_settable = True
# The name of the wrapping namespace or struct
wrapper_name = 'Enums'
# If true, the enum is wrapped in a struct rather than a namespace
wrapper_is_struct = False
# If not None, use this as the enum name rather than this class name
enum_name = None
def __init__(self, value):
if value not in self.map:
raise TypeError, "Enum param got bad value '%s' (not in %s)" \
% (value, self.vals)
self.value = value
def __call__(self, value):
self.__init__(value)
return value
@classmethod
def cxx_predecls(cls, code):
code('#include "enums/$0.hh"', cls.__name__)
@classmethod
def swig_predecls(cls, code):
code('%import "python/m5/internal/enum_$0.i"', cls.__name__)
@classmethod
def cxx_ini_parse(cls, code, src, dest, ret):
code('if (false) {')
for elem_name in cls.map.iterkeys():
code('} else if (%s == "%s") {' % (src, elem_name))
code.indent()
code('%s = Enums::%s;' % (dest, elem_name))
code('%s true;' % ret)
code.dedent()
code('} else {')
code(' %s false;' % ret)
code('}')
def getValue(self):
return int(self.map[self.value])
def __str__(self):
return self.value
# how big does a rounding error need to be before we warn about it?
frequency_tolerance = 0.001 # 0.1%
class TickParamValue(NumericParamValue):
cxx_type = 'Tick'
ex_str = "1MHz"
cmd_line_settable = True
@classmethod
def cxx_predecls(cls, code):
code('#include "base/types.hh"')
@classmethod
def swig_predecls(cls, code):
code('%import "stdint.i"')
code('%import "base/types.hh"')
def __call__(self, value):
self.__init__(value)
return value
def getValue(self):
return long(self.value)
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
# Ticks are expressed in seconds in JSON files and in plain
# Ticks in .ini files. Switch based on a config flag
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('${ret} to_number(${src}, ${dest});')
class Latency(TickParamValue):
ex_str = "100ns"
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
self.ticks = value.ticks
self.value = value.value
elif isinstance(value, Frequency):
self.ticks = value.ticks
self.value = 1.0 / value.value
elif value.endswith('t'):
self.ticks = True
self.value = int(value[:-1])
else:
self.ticks = False
self.value = convert.toLatency(value)
def __call__(self, value):
self.__init__(value)
return value
def __getattr__(self, attr):
if attr in ('latency', 'period'):
return self
if attr == 'frequency':
return Frequency(self)
raise AttributeError, "Latency object has no attribute '%s'" % attr
def getValue(self):
if self.ticks or self.value == 0:
value = self.value
else:
value = ticks.fromSeconds(self.value)
return long(value)
def config_value(self):
return self.getValue()
# convert latency to ticks
def ini_str(self):
return '%d' % self.getValue()
class Frequency(TickParamValue):
ex_str = "1GHz"
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
if value.value == 0:
self.value = 0
else:
self.value = 1.0 / value.value
self.ticks = value.ticks
elif isinstance(value, Frequency):
self.value = value.value
self.ticks = value.ticks
else:
self.ticks = False
self.value = convert.toFrequency(value)
def __call__(self, value):
self.__init__(value)
return value
def __getattr__(self, attr):
if attr == 'frequency':
return self
if attr in ('latency', 'period'):
return Latency(self)
raise AttributeError, "Frequency object has no attribute '%s'" % attr
# convert latency to ticks
def getValue(self):
if self.ticks or self.value == 0:
value = self.value
else:
value = ticks.fromSeconds(1.0 / self.value)
return long(value)
def config_value(self):
return self.getValue()
def ini_str(self):
return '%d' % self.getValue()
# A generic Frequency and/or Latency value. Value is stored as a
# latency, just like Latency and Frequency.
class Clock(TickParamValue):
def __init__(self, value):
if isinstance(value, (Latency, Clock)):
self.ticks = value.ticks
self.value = value.value
elif isinstance(value, Frequency):
self.ticks = value.ticks
self.value = 1.0 / value.value
elif value.endswith('t'):
self.ticks = True
self.value = int(value[:-1])
else:
self.ticks = False
self.value = convert.anyToLatency(value)
def __call__(self, value):
self.__init__(value)
return value
def __str__(self):
return "%s" % Latency(self)
def __getattr__(self, attr):
if attr == 'frequency':
return Frequency(self)
if attr in ('latency', 'period'):
return Latency(self)
raise AttributeError, "Frequency object has no attribute '%s'" % attr
def getValue(self):
return self.period.getValue()
def config_value(self):
return self.period.config_value()
def ini_str(self):
return self.period.ini_str()
class Voltage(float,ParamValue):
cxx_type = 'double'
ex_str = "1V"
cmd_line_settable = True
def __new__(cls, value):
# convert to voltage
val = convert.toVoltage(value)
return super(cls, Voltage).__new__(cls, val)
def __call__(self, value):
val = convert.toVoltage(value)
self.__init__(val)
return value
def __str__(self):
return str(self.getValue())
def getValue(self):
value = float(self)
return value
def ini_str(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class Current(float, ParamValue):
cxx_type = 'double'
ex_str = "1mA"
cmd_line_settable = False
def __new__(cls, value):
# convert to current
val = convert.toCurrent(value)
return super(cls, Current).__new__(cls, val)
def __call__(self, value):
val = convert.toCurrent(value)
self.__init__(val)
return value
def __str__(self):
return str(self.getValue())
def getValue(self):
value = float(self)
return value
def ini_str(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class NetworkBandwidth(float,ParamValue):
cxx_type = 'float'
ex_str = "1Gbps"
cmd_line_settable = True
def __new__(cls, value):
# convert to bits per second
val = convert.toNetworkBandwidth(value)
return super(cls, NetworkBandwidth).__new__(cls, val)
def __str__(self):
return str(self.val)
def __call__(self, value):
val = convert.toNetworkBandwidth(value)
self.__init__(val)
return value
def getValue(self):
# convert to seconds per byte
value = 8.0 / float(self)
# convert to ticks per byte
value = ticks.fromSeconds(value)
return float(value)
def ini_str(self):
return '%f' % self.getValue()
def config_value(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
class MemoryBandwidth(float,ParamValue):
cxx_type = 'float'
ex_str = "1GB/s"
cmd_line_settable = True
def __new__(cls, value):
# convert to bytes per second
val = convert.toMemoryBandwidth(value)
return super(cls, MemoryBandwidth).__new__(cls, val)
def __call__(self, value):
val = convert.toMemoryBandwidth(value)
self.__init__(val)
return value
def getValue(self):
# convert to seconds per byte
value = float(self)
if value:
value = 1.0 / float(self)
# convert to ticks per byte
value = ticks.fromSeconds(value)
return float(value)
def ini_str(self):
return '%f' % self.getValue()
def config_value(self):
return '%f' % self.getValue()
@classmethod
def cxx_ini_predecls(cls, code):
code('#include <sstream>')
@classmethod
def cxx_ini_parse(self, code, src, dest, ret):
code('%s (std::istringstream(%s) >> %s).eof();' % (ret, src, dest))
#
# "Constants"... handy aliases for various values.
#
# Special class for NULL pointers. Note the special check in
# make_param_value() above that lets these be assigned where a
# SimObject is required.
# only one copy of a particular node
class NullSimObject(object):
__metaclass__ = Singleton
def __call__(cls):
return cls
def _instantiate(self, parent = None, path = ''):
pass
def ini_str(self):
return 'Null'
def unproxy(self, base):
return self
def set_path(self, parent, name):
pass
def __str__(self):
return 'Null'
def config_value(self):
return None
def getValue(self):
return None
# The only instance you'll ever need...
NULL = NullSimObject()
def isNullPointer(value):
return isinstance(value, NullSimObject)
# Some memory range specifications use this as a default upper bound.
MaxAddr = Addr.max
MaxTick = Tick.max
AllMemory = AddrRange(0, MaxAddr)
#####################################################################
#
# Port objects
#
# Ports are used to interconnect objects in the memory system.
#
#####################################################################
# Port reference: encapsulates a reference to a particular port on a
# particular SimObject.
class PortRef(object):
def __init__(self, simobj, name, role):
assert(isSimObject(simobj) or isSimObjectClass(simobj))
self.simobj = simobj
self.name = name
self.role = role
self.peer = None # not associated with another port yet
self.ccConnected = False # C++ port connection done?
self.index = -1 # always -1 for non-vector ports
def __str__(self):
return '%s.%s' % (self.simobj, self.name)
def __len__(self):
# Return the number of connected ports, i.e. 0 is we have no
# peer and 1 if we do.
return int(self.peer != None)
# for config.ini, print peer's name (not ours)
def ini_str(self):
return str(self.peer)
# for config.json
def get_config_as_dict(self):
return {'role' : self.role, 'peer' : str(self.peer)}
def __getattr__(self, attr):
if attr == 'peerObj':
# shorthand for proxies
return self.peer.simobj
raise AttributeError, "'%s' object has no attribute '%s'" % \
(self.__class__.__name__, attr)
# Full connection is symmetric (both ways). Called via
# SimObject.__setattr__ as a result of a port assignment, e.g.,
# "obj1.portA = obj2.portB", or via VectorPortElementRef.__setitem__,
# e.g., "obj1.portA[3] = obj2.portB".
def connect(self, other):
if isinstance(other, VectorPortRef):
# reference to plain VectorPort is implicit append
other = other._get_next()
if self.peer and not proxy.isproxy(self.peer):
fatal("Port %s is already connected to %s, cannot connect %s\n",
self, self.peer, other);
self.peer = other
if proxy.isproxy(other):
other.set_param_desc(PortParamDesc())
elif isinstance(other, PortRef):
if other.peer is not self:
other.connect(self)
else:
raise TypeError, \
"assigning non-port reference '%s' to port '%s'" \
% (other, self)
# Allow a master/slave port pair to be spliced between
# a port and its connected peer. Useful operation for connecting
# instrumentation structures into a system when it is necessary
# to connect the instrumentation after the full system has been
# constructed.
def splice(self, new_master_peer, new_slave_peer):
if self.peer and not proxy.isproxy(self.peer):
if isinstance(new_master_peer, PortRef) and \
isinstance(new_slave_peer, PortRef):
old_peer = self.peer
if self.role == 'SLAVE':
self.peer = new_master_peer
old_peer.peer = new_slave_peer
new_master_peer.connect(self)
new_slave_peer.connect(old_peer)
elif self.role == 'MASTER':
self.peer = new_slave_peer
old_peer.peer = new_master_peer
new_slave_peer.connect(self)
new_master_peer.connect(old_peer)
else:
panic("Port %s has unknown role, "+\
"cannot splice in new peers\n", self)
else:
raise TypeError, \
"Splicing non-port references '%s','%s' to port '%s'"\
% (new_peer, peers_new_peer, self)
else:
fatal("Port %s not connected, cannot splice in new peers\n", self)
def clone(self, simobj, memo):
if memo.has_key(self):
return memo[self]
newRef = copy.copy(self)
memo[self] = newRef
newRef.simobj = simobj
assert(isSimObject(newRef.simobj))
if self.peer and not proxy.isproxy(self.peer):
peerObj = self.peer.simobj(_memo=memo)
newRef.peer = self.peer.clone(peerObj, memo)
assert(not isinstance(newRef.peer, VectorPortRef))
return newRef
def unproxy(self, simobj):
assert(simobj is self.simobj)
if proxy.isproxy(self.peer):
try:
realPeer = self.peer.unproxy(self.simobj)
except:
print "Error in unproxying port '%s' of %s" % \
(self.name, self.simobj.path())
raise
self.connect(realPeer)
# Call C++ to create corresponding port connection between C++ objects
def ccConnect(self):
from m5.internal.pyobject import connectPorts
if self.role == 'SLAVE':
# do nothing and let the master take care of it
return
if self.ccConnected: # already done this
return
peer = self.peer
if not self.peer: # nothing to connect to
return
# check that we connect a master to a slave
if self.role == peer.role:
raise TypeError, \
"cannot connect '%s' and '%s' due to identical role '%s'" \
% (peer, self, self.role)
try:
# self is always the master and peer the slave
connectPorts(self.simobj.getCCObject(), self.name, self.index,
peer.simobj.getCCObject(), peer.name, peer.index)
except:
print "Error connecting port %s.%s to %s.%s" % \
(self.simobj.path(), self.name,
peer.simobj.path(), peer.name)
raise
self.ccConnected = True
peer.ccConnected = True
# A reference to an individual element of a VectorPort... much like a
# PortRef, but has an index.
class VectorPortElementRef(PortRef):
def __init__(self, simobj, name, role, index):
PortRef.__init__(self, simobj, name, role)
self.index = index
def __str__(self):
return '%s.%s[%d]' % (self.simobj, self.name, self.index)
# A reference to a complete vector-valued port (not just a single element).
# Can be indexed to retrieve individual VectorPortElementRef instances.
class VectorPortRef(object):
def __init__(self, simobj, name, role):
assert(isSimObject(simobj) or isSimObjectClass(simobj))
self.simobj = simobj
self.name = name
self.role = role
self.elements = []
def __str__(self):
return '%s.%s[:]' % (self.simobj, self.name)
def __len__(self):
# Return the number of connected peers, corresponding the the
# length of the elements.
return len(self.elements)
# for config.ini, print peer's name (not ours)
def ini_str(self):
return ' '.join([el.ini_str() for el in self.elements])
# for config.json
def get_config_as_dict(self):
return {'role' : self.role,
'peer' : [el.ini_str() for el in self.elements]}
def __getitem__(self, key):
if not isinstance(key, int):
raise TypeError, "VectorPort index must be integer"
if key >= len(self.elements):
# need to extend list
ext = [VectorPortElementRef(self.simobj, self.name, self.role, i)
for i in range(len(self.elements), key+1)]
self.elements.extend(ext)
return self.elements[key]
def _get_next(self):
return self[len(self.elements)]
def __setitem__(self, key, value):
if not isinstance(key, int):
raise TypeError, "VectorPort index must be integer"
self[key].connect(value)
def connect(self, other):
if isinstance(other, (list, tuple)):
# Assign list of port refs to vector port.
# For now, append them... not sure if that's the right semantics
# or if it should replace the current vector.
for ref in other:
self._get_next().connect(ref)
else:
# scalar assignment to plain VectorPort is implicit append
self._get_next().connect(other)
def clone(self, simobj, memo):
if memo.has_key(self):
return memo[self]
newRef = copy.copy(self)
memo[self] = newRef
newRef.simobj = simobj
assert(isSimObject(newRef.simobj))
newRef.elements = [el.clone(simobj, memo) for el in self.elements]
return newRef
def unproxy(self, simobj):
[el.unproxy(simobj) for el in self.elements]
def ccConnect(self):
[el.ccConnect() for el in self.elements]
# Port description object. Like a ParamDesc object, this represents a
# logical port in the SimObject class, not a particular port on a
# SimObject instance. The latter are represented by PortRef objects.
class Port(object):
# Generate a PortRef for this port on the given SimObject with the
# given name
def makeRef(self, simobj):
return PortRef(simobj, self.name, self.role)
# Connect an instance of this port (on the given SimObject with
# the given name) with the port described by the supplied PortRef
def connect(self, simobj, ref):
self.makeRef(simobj).connect(ref)
# No need for any pre-declarations at the moment as we merely rely
# on an unsigned int.
def cxx_predecls(self, code):
pass
# Declare an unsigned int with the same name as the port, that
# will eventually hold the number of connected ports (and thus the
# number of elements for a VectorPort).
def cxx_decl(self, code):
code('unsigned int port_${{self.name}}_connection_count;')
class MasterPort(Port):
# MasterPort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'MASTER'
else:
raise TypeError, 'wrong number of arguments'
class SlavePort(Port):
# SlavePort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'SLAVE'
else:
raise TypeError, 'wrong number of arguments'
# VectorPort description object. Like Port, but represents a vector
# of connections (e.g., as on a XBar).
class VectorPort(Port):
def __init__(self, *args):
self.isVec = True
def makeRef(self, simobj):
return VectorPortRef(simobj, self.name, self.role)
class VectorMasterPort(VectorPort):
# VectorMasterPort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'MASTER'
VectorPort.__init__(self, *args)
else:
raise TypeError, 'wrong number of arguments'
class VectorSlavePort(VectorPort):
# VectorSlavePort("description")
def __init__(self, *args):
if len(args) == 1:
self.desc = args[0]
self.role = 'SLAVE'
VectorPort.__init__(self, *args)
else:
raise TypeError, 'wrong number of arguments'
# 'Fake' ParamDesc for Port references to assign to the _pdesc slot of
# proxy objects (via set_param_desc()) so that proxy error messages
# make sense.
class PortParamDesc(object):
__metaclass__ = Singleton
ptype_str = 'Port'
ptype = Port
baseEnums = allEnums.copy()
baseParams = allParams.copy()
def clear():
global allEnums, allParams
allEnums = baseEnums.copy()
allParams = baseParams.copy()
__all__ = ['Param', 'VectorParam',
'Enum', 'Bool', 'String', 'Float',
'Int', 'Unsigned', 'Int8', 'UInt8', 'Int16', 'UInt16',
'Int32', 'UInt32', 'Int64', 'UInt64',
'Counter', 'Addr', 'Tick', 'Percent',
'TcpPort', 'UdpPort', 'EthernetAddr',
'IpAddress', 'IpNetmask', 'IpWithPort',
'MemorySize', 'MemorySize32',
'Latency', 'Frequency', 'Clock', 'Voltage',
'NetworkBandwidth', 'MemoryBandwidth',
'AddrRange',
'MaxAddr', 'MaxTick', 'AllMemory',
'Time',
'NextEthernetAddr', 'NULL',
'MasterPort', 'SlavePort',
'VectorMasterPort', 'VectorSlavePort']
import SimObject
|
bsd-3-clause
|
h8rift/android_kernel_htc_msm8960-evita-1_85
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
mikebrevard/UnixAdministration
|
vagrant/etc/data/genData/venv/lib/python3.4/site-packages/setuptools/depends.py
|
462
|
6370
|
import sys
import imp
import marshal
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from distutils.version import StrictVersion
from setuptools import compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name,self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f,p,i = find_module(self.module,paths)
if f: f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * compat.long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix,mode,kind) = info = imp.find_module(part, paths)
if kind==PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts,module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind==PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind==PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind==PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op==LOAD_CONST:
const = code.co_consts[arg]
elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
|
mit
|
arkmaxim/grpc
|
src/python/grpcio/grpc/framework/foundation/logging_pool.py
|
21
|
3030
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A thread pool that logs exceptions raised by tasks executed within it."""
import logging
from concurrent import futures
def _wrap(behavior):
"""Wraps an arbitrary callable behavior in exception-logging."""
def _wrapping(*args, **kwargs):
try:
return behavior(*args, **kwargs)
except Exception as e:
logging.exception(
'Unexpected exception from %s executed in logging pool!', behavior)
raise
return _wrapping
class _LoggingPool(object):
"""An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
def __init__(self, backing_pool):
self._backing_pool = backing_pool
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._backing_pool.shutdown(wait=True)
def submit(self, fn, *args, **kwargs):
return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
def map(self, func, *iterables, **kwargs):
return self._backing_pool.map(
_wrap(func), *iterables, timeout=kwargs.get('timeout', None))
def shutdown(self, wait=True):
self._backing_pool.shutdown(wait=wait)
def pool(max_workers):
"""Creates a thread pool that logs exceptions raised by the tasks within it.
Args:
max_workers: The maximum number of worker threads to allow the pool.
Returns:
A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
raised by the tasks executed within it.
"""
return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
|
bsd-3-clause
|
pingpan2013/sensor-box-project
|
sensor_project/sensor_box.py
|
1
|
6325
|
#!/usr/bin/env python
#
# File Name: sensor_box.py
#
# Desc:
# Control the sensor to get humidity and moisture infomation
# If internet is down, store the result into local files
# else send the data to the database
#
import os
import time
import datetime
import logging
import subprocess
import RPi.GPIO as GPIO
import conf
from led import turn_LED_on, turn_LED_off, LED_YELLOW, LED_GREEN
import server_conn
import log_management as log_m
import csv_data
import mois_sensor
import humi_sensor
import curr_sensor
import temp_sensor
import water_level
GPIO.setwarnings(False)
def main():
'''
The main process of this project:
1. Get data from sensors, including humidity/moisture/temperature
2. Store the data locally
3. Check if Internet is available; if so, send the data to the server
4. Wait to restart cycle
'''
# Some preparation work
# Use yellow LED to indicate code is running
turn_LED_on(LED_YELLOW)
internet_working = True
humidity = None
temp_f = None
moistures = None
temps = None
water_depth = None
# Time spent waiting for sensor values with reduced noise
sensor_reading_time = conf.water_level_interval
# Ensure required picture directories exist
if not os.path.exists(conf.online_pictures_folder):
os.makedirs(conf.online_pictures_folder)
if not os.path.exists(conf.offline_pictures_folder):
os.makedirs(conf.offline_pictures_folder)
# Initial stuff at the top of the log file.
log_m.start_log()
# Begin main program loop
while True:
# Get current timestamp first
now = datetime.datetime.now()
picture_filename_timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
mysql_timestamp = now.strftime('%Y-%m-%d %H:%M:%S')
csv_timestamp = now.strftime('%m/%d/%y %I:%M %p')
pic_filename = str(picture_filename_timestamp) + '.jpg'
pic_path = conf.offline_pictures_folder + pic_filename
logging.info('Beginning data collection cycle')
# STEP 1: Get all data from sensors
try:
if conf.using_humidity_sensor:
humidity, temp_f = humi_sensor.get_humidity_and_temp()
logging.debug("Humidity: %.1f %%" % humidity)
logging.debug("Internal Temperature: %.1f F" % temp_f)
if conf.num_moisture_sensors > 0:
moistures = mois_sensor.get_moistures(conf.num_moisture_sensors)
sensor_chr = 'A'
for moisture in moistures:
logging.debug("Moisture " + sensor_chr + " = " + str(moisture))
sensor_chr = chr(ord(sensor_chr) + 1)
if len(moistures) != conf.num_moisture_sensors:
logging.error("Number of moistures doesn't match conf")
if conf.num_temp_sensors > 0:
temps = temp_sensor.get_temp_data_f()
sensor_chr = 'A'
for temp in temps:
logging.debug("Temperature " + sensor_chr + " = " + str(temp))
sensor_chr = chr(ord(sensor_chr) + 1)
if len(temps) != conf.num_temp_sensors:
logging.error("Number of temperatures doesn't match conf")
if conf.using_water_level_sensor:
water_depth = water_level.get_inches(conf.water_level_interval)
except:
logging.exception("Exception occurred while reading sensors")
logging.info('Gathered data')
if conf.using_camera:
subprocess.call(['raspistill', '-o', pic_path])
logging.debug('Took picture ' + pic_filename)
# STEP 2: Store data locally
# Initialize CSV file if not present
csv_data.initialize()
# Add new line of data to CSV file
csv_data.write_data(temp_f,
humidity,
moistures,
temps,
water_depth,
csv_timestamp
)
logging.debug("Next reading to be collected in "
+ str(float(conf.period)/60.0) + " minutes")
time.sleep((conf.period - sensor_reading_time)/2)
# STEP 3: Send data to the server and database if Internet is available
try:
if server_conn.internet_on():
if not internet_working:
logging.warning('Internet restored; sending data...')
internet_working = True
else:
logging.info('Sending data...')
# Turn on green LED to indicate Internet usage
turn_LED_on(LED_GREEN)
if conf.using_camera:
try:
# store pictures to FTP server
server_conn.store_data_to_ftp(pic_filename)
os.system("rm -f " + conf.offline_pictures_folder + '*')
except IOError:
logging.exception('Could not send picture file')
try:
# store data in database
server_conn.store_data_to_db(temp_f,
humidity,
moistures,
temps,
water_depth,
mysql_timestamp
)
except:
logging.exception('Exception occurred with database code')
logging.info('Data sent')
else:
if internet_working:
logging.warning('Internet is down; could not send data')
internet_working = False
except:
logging.exception('Exception occurred when trying to send data')
turn_LED_off(LED_GREEN)
# STEP 4: Wait until cycle starts over again
time.sleep((conf.period - sensor_reading_time)/2)
# End of main loop
turn_LED_off(LED_YELLOW)
logging.info('Exited main loop. Stopping data recording')
if __name__ == '__main__':
main()
|
gpl-3.0
|
ict-felix/stack
|
modules/resource/manager/transit-network/src/proxy_interface.py
|
2
|
1172
|
# Copyright 2014-2015 National Institute of Advanced Industrial Science and Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
from abc import ABCMeta, abstractmethod
# class Handler(SimpleXMLRPCRequestHandler):
class Proxy:
__metaclass__ = ABCMeta
@abstractmethod
def reserve(resv):
pass
@abstractmethod
def modify(resv, end_time_sec):
pass
@abstractmethod
def provision(resv):
pass
@abstractmethod
def release(resv):
pass
@abstractmethod
def terminate(resv):
pass
|
apache-2.0
|
apophys/freeipa
|
ipaclient/remote_plugins/2_156/netgroup.py
|
16
|
24373
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Netgroups
A netgroup is a group used for permission checking. It can contain both
user and host values.
EXAMPLES:
Add a new netgroup:
ipa netgroup-add --desc="NFS admins" admins
Add members to the netgroup:
ipa netgroup-add-member --users=tuser1 --users=tuser2 admins
Remove a member from the netgroup:
ipa netgroup-remove-member --users=tuser2 admins
Display information about a netgroup:
ipa netgroup-show admins
Delete a netgroup:
ipa netgroup-del admins
""")
register = Registry()
@register()
class netgroup(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Netgroup name'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
label=_(u'NIS domain name'),
),
parameters.Str(
'ipauniqueid',
required=False,
label=_(u'IPA unique ID'),
doc=_(u'IPA unique ID'),
),
parameters.Str(
'usercategory',
required=False,
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
),
parameters.Str(
'member_netgroup',
required=False,
label=_(u'Member netgroups'),
),
parameters.Str(
'memberof_netgroup',
required=False,
label=_(u'Member of netgroups'),
),
parameters.Str(
'memberindirect_netgroup',
required=False,
label=_(u'Indirect Member netgroups'),
),
parameters.Str(
'memberuser_user',
required=False,
label=_(u'Member User'),
),
parameters.Str(
'memberuser_group',
required=False,
label=_(u'Member Group'),
),
parameters.Str(
'memberhost_host',
required=False,
label=_(u'Member Host'),
),
parameters.Str(
'memberhost_hostgroup',
required=False,
label=_(u'Member Hostgroup'),
),
)
@register()
class netgroup_add(Method):
__doc__ = _("Add a new netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
cli_name='nisdomain',
label=_(u'NIS domain name'),
),
parameters.Str(
'usercategory',
required=False,
cli_name='usercat',
cli_metavar="['all']",
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
cli_name='hostcat',
cli_metavar="['all']",
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
exclude=('cli', 'webui'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class netgroup_add_member(Method):
__doc__ = _("Add members to a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'user',
required=False,
multivalue=True,
cli_name='users',
label=_(u'member user'),
doc=_(u'users to add'),
alwaysask=True,
),
parameters.Str(
'group',
required=False,
multivalue=True,
cli_name='groups',
label=_(u'member group'),
doc=_(u'groups to add'),
alwaysask=True,
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'member host'),
doc=_(u'hosts to add'),
alwaysask=True,
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'member host group'),
doc=_(u'host groups to add'),
alwaysask=True,
),
parameters.Str(
'netgroup',
required=False,
multivalue=True,
cli_name='netgroups',
label=_(u'member netgroup'),
doc=_(u'netgroups to add'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be added'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members added'),
),
)
@register()
class netgroup_del(Method):
__doc__ = _("Delete a netgroup.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class netgroup_find(Method):
__doc__ = _("Search for a netgroup.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
cli_name='nisdomain',
label=_(u'NIS domain name'),
),
parameters.Str(
'ipauniqueid',
required=False,
cli_name='uuid',
label=_(u'IPA unique ID'),
doc=_(u'IPA unique ID'),
),
parameters.Str(
'usercategory',
required=False,
cli_name='usercat',
cli_metavar="['all']",
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
cli_name='hostcat',
cli_metavar="['all']",
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
exclude=('cli', 'webui'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds (0 is unlimited)'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned (0 is unlimited)'),
),
parameters.Flag(
'private',
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'managed',
doc=_(u'search for managed groups'),
default=False,
default_from=DefaultFrom(lambda private: private),
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("name")'),
default=False,
autofill=True,
),
parameters.Str(
'netgroup',
required=False,
multivalue=True,
cli_name='netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups with these member netgroups.'),
),
parameters.Str(
'no_netgroup',
required=False,
multivalue=True,
cli_name='no_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups without these member netgroups.'),
),
parameters.Str(
'user',
required=False,
multivalue=True,
cli_name='users',
label=_(u'user'),
doc=_(u'Search for netgroups with these member users.'),
),
parameters.Str(
'no_user',
required=False,
multivalue=True,
cli_name='no_users',
label=_(u'user'),
doc=_(u'Search for netgroups without these member users.'),
),
parameters.Str(
'group',
required=False,
multivalue=True,
cli_name='groups',
label=_(u'group'),
doc=_(u'Search for netgroups with these member groups.'),
),
parameters.Str(
'no_group',
required=False,
multivalue=True,
cli_name='no_groups',
label=_(u'group'),
doc=_(u'Search for netgroups without these member groups.'),
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'host'),
doc=_(u'Search for netgroups with these member hosts.'),
),
parameters.Str(
'no_host',
required=False,
multivalue=True,
cli_name='no_hosts',
label=_(u'host'),
doc=_(u'Search for netgroups without these member hosts.'),
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'host group'),
doc=_(u'Search for netgroups with these member host groups.'),
),
parameters.Str(
'no_hostgroup',
required=False,
multivalue=True,
cli_name='no_hostgroups',
label=_(u'host group'),
doc=_(u'Search for netgroups without these member host groups.'),
),
parameters.Str(
'in_netgroup',
required=False,
multivalue=True,
cli_name='in_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups with these member of netgroups.'),
),
parameters.Str(
'not_in_netgroup',
required=False,
multivalue=True,
cli_name='not_in_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for netgroups without these member of netgroups.'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class netgroup_mod(Method):
__doc__ = _("Modify a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Netgroup description'),
),
parameters.Str(
'nisdomainname',
required=False,
cli_name='nisdomain',
label=_(u'NIS domain name'),
),
parameters.Str(
'usercategory',
required=False,
cli_name='usercat',
cli_metavar="['all']",
label=_(u'User category'),
doc=_(u'User category the rule applies to'),
),
parameters.Str(
'hostcategory',
required=False,
cli_name='hostcat',
cli_metavar="['all']",
label=_(u'Host category'),
doc=_(u'Host category the rule applies to'),
),
parameters.Str(
'externalhost',
required=False,
multivalue=True,
label=_(u'External host'),
exclude=('cli', 'webui'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class netgroup_remove_member(Method):
__doc__ = _("Remove members from a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'user',
required=False,
multivalue=True,
cli_name='users',
label=_(u'member user'),
doc=_(u'users to remove'),
alwaysask=True,
),
parameters.Str(
'group',
required=False,
multivalue=True,
cli_name='groups',
label=_(u'member group'),
doc=_(u'groups to remove'),
alwaysask=True,
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'member host'),
doc=_(u'hosts to remove'),
alwaysask=True,
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'member host group'),
doc=_(u'host groups to remove'),
alwaysask=True,
),
parameters.Str(
'netgroup',
required=False,
multivalue=True,
cli_name='netgroups',
label=_(u'member netgroup'),
doc=_(u'netgroups to remove'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be removed'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members removed'),
),
)
@register()
class netgroup_show(Method):
__doc__ = _("Display information about a netgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='name',
label=_(u'Netgroup name'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
|
gpl-3.0
|
pjbull/mkdocs
|
mkdocs/commands/new.py
|
30
|
1433
|
# coding: utf-8
from __future__ import unicode_literals
import io
import logging
import os
config_text = 'site_name: My Docs\n'
index_text = """# Welcome to MkDocs
For full documentation visit [mkdocs.org](http://mkdocs.org).
## Commands
* `mkdocs new [dir-name]` - Create a new project.
* `mkdocs serve` - Start the live-reloading docs server.
* `mkdocs build` - Build the documentation site.
* `mkdocs help` - Print this help message.
## Project layout
mkdocs.yml # The configuration file.
docs/
index.md # The documentation homepage.
... # Other markdown pages, images and other files.
"""
log = logging.getLogger(__name__)
def new(output_dir):
docs_dir = os.path.join(output_dir, 'docs')
config_path = os.path.join(output_dir, 'mkdocs.yml')
index_path = os.path.join(docs_dir, 'index.md')
if os.path.exists(config_path):
log.info('Project already exists.')
return
if not os.path.exists(output_dir):
log.info('Creating project directory: %s', output_dir)
os.mkdir(output_dir)
log.info('Writing config file: %s', config_path)
io.open(config_path, 'w', encoding='utf-8').write(config_text)
if os.path.exists(index_path):
return
log.info('Writing initial docs: %s', index_path)
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
io.open(index_path, 'w', encoding='utf-8').write(index_text)
|
bsd-2-clause
|
xNovax/SickRage
|
lib/unidecode/x091.py
|
252
|
4655
|
data = (
'Ruo ', # 0x00
'Bei ', # 0x01
'E ', # 0x02
'Yu ', # 0x03
'Juan ', # 0x04
'Yu ', # 0x05
'Yun ', # 0x06
'Hou ', # 0x07
'Kui ', # 0x08
'Xiang ', # 0x09
'Xiang ', # 0x0a
'Sou ', # 0x0b
'Tang ', # 0x0c
'Ming ', # 0x0d
'Xi ', # 0x0e
'Ru ', # 0x0f
'Chu ', # 0x10
'Zi ', # 0x11
'Zou ', # 0x12
'Ju ', # 0x13
'Wu ', # 0x14
'Xiang ', # 0x15
'Yun ', # 0x16
'Hao ', # 0x17
'Yong ', # 0x18
'Bi ', # 0x19
'Mo ', # 0x1a
'Chao ', # 0x1b
'Fu ', # 0x1c
'Liao ', # 0x1d
'Yin ', # 0x1e
'Zhuan ', # 0x1f
'Hu ', # 0x20
'Qiao ', # 0x21
'Yan ', # 0x22
'Zhang ', # 0x23
'Fan ', # 0x24
'Qiao ', # 0x25
'Xu ', # 0x26
'Deng ', # 0x27
'Bi ', # 0x28
'Xin ', # 0x29
'Bi ', # 0x2a
'Ceng ', # 0x2b
'Wei ', # 0x2c
'Zheng ', # 0x2d
'Mao ', # 0x2e
'Shan ', # 0x2f
'Lin ', # 0x30
'Po ', # 0x31
'Dan ', # 0x32
'Meng ', # 0x33
'Ye ', # 0x34
'Cao ', # 0x35
'Kuai ', # 0x36
'Feng ', # 0x37
'Meng ', # 0x38
'Zou ', # 0x39
'Kuang ', # 0x3a
'Lian ', # 0x3b
'Zan ', # 0x3c
'Chan ', # 0x3d
'You ', # 0x3e
'Qi ', # 0x3f
'Yan ', # 0x40
'Chan ', # 0x41
'Zan ', # 0x42
'Ling ', # 0x43
'Huan ', # 0x44
'Xi ', # 0x45
'Feng ', # 0x46
'Zan ', # 0x47
'Li ', # 0x48
'You ', # 0x49
'Ding ', # 0x4a
'Qiu ', # 0x4b
'Zhuo ', # 0x4c
'Pei ', # 0x4d
'Zhou ', # 0x4e
'Yi ', # 0x4f
'Hang ', # 0x50
'Yu ', # 0x51
'Jiu ', # 0x52
'Yan ', # 0x53
'Zui ', # 0x54
'Mao ', # 0x55
'Dan ', # 0x56
'Xu ', # 0x57
'Tou ', # 0x58
'Zhen ', # 0x59
'Fen ', # 0x5a
'Sakenomoto ', # 0x5b
'[?] ', # 0x5c
'Yun ', # 0x5d
'Tai ', # 0x5e
'Tian ', # 0x5f
'Qia ', # 0x60
'Tuo ', # 0x61
'Zuo ', # 0x62
'Han ', # 0x63
'Gu ', # 0x64
'Su ', # 0x65
'Po ', # 0x66
'Chou ', # 0x67
'Zai ', # 0x68
'Ming ', # 0x69
'Luo ', # 0x6a
'Chuo ', # 0x6b
'Chou ', # 0x6c
'You ', # 0x6d
'Tong ', # 0x6e
'Zhi ', # 0x6f
'Xian ', # 0x70
'Jiang ', # 0x71
'Cheng ', # 0x72
'Yin ', # 0x73
'Tu ', # 0x74
'Xiao ', # 0x75
'Mei ', # 0x76
'Ku ', # 0x77
'Suan ', # 0x78
'Lei ', # 0x79
'Pu ', # 0x7a
'Zui ', # 0x7b
'Hai ', # 0x7c
'Yan ', # 0x7d
'Xi ', # 0x7e
'Niang ', # 0x7f
'Wei ', # 0x80
'Lu ', # 0x81
'Lan ', # 0x82
'Yan ', # 0x83
'Tao ', # 0x84
'Pei ', # 0x85
'Zhan ', # 0x86
'Chun ', # 0x87
'Tan ', # 0x88
'Zui ', # 0x89
'Chuo ', # 0x8a
'Cu ', # 0x8b
'Kun ', # 0x8c
'Ti ', # 0x8d
'Mian ', # 0x8e
'Du ', # 0x8f
'Hu ', # 0x90
'Xu ', # 0x91
'Xing ', # 0x92
'Tan ', # 0x93
'Jiu ', # 0x94
'Chun ', # 0x95
'Yun ', # 0x96
'Po ', # 0x97
'Ke ', # 0x98
'Sou ', # 0x99
'Mi ', # 0x9a
'Quan ', # 0x9b
'Chou ', # 0x9c
'Cuo ', # 0x9d
'Yun ', # 0x9e
'Yong ', # 0x9f
'Ang ', # 0xa0
'Zha ', # 0xa1
'Hai ', # 0xa2
'Tang ', # 0xa3
'Jiang ', # 0xa4
'Piao ', # 0xa5
'Shan ', # 0xa6
'Yu ', # 0xa7
'Li ', # 0xa8
'Zao ', # 0xa9
'Lao ', # 0xaa
'Yi ', # 0xab
'Jiang ', # 0xac
'Pu ', # 0xad
'Jiao ', # 0xae
'Xi ', # 0xaf
'Tan ', # 0xb0
'Po ', # 0xb1
'Nong ', # 0xb2
'Yi ', # 0xb3
'Li ', # 0xb4
'Ju ', # 0xb5
'Jiao ', # 0xb6
'Yi ', # 0xb7
'Niang ', # 0xb8
'Ru ', # 0xb9
'Xun ', # 0xba
'Chou ', # 0xbb
'Yan ', # 0xbc
'Ling ', # 0xbd
'Mi ', # 0xbe
'Mi ', # 0xbf
'Niang ', # 0xc0
'Xin ', # 0xc1
'Jiao ', # 0xc2
'Xi ', # 0xc3
'Mi ', # 0xc4
'Yan ', # 0xc5
'Bian ', # 0xc6
'Cai ', # 0xc7
'Shi ', # 0xc8
'You ', # 0xc9
'Shi ', # 0xca
'Shi ', # 0xcb
'Li ', # 0xcc
'Zhong ', # 0xcd
'Ye ', # 0xce
'Liang ', # 0xcf
'Li ', # 0xd0
'Jin ', # 0xd1
'Jin ', # 0xd2
'Qiu ', # 0xd3
'Yi ', # 0xd4
'Diao ', # 0xd5
'Dao ', # 0xd6
'Zhao ', # 0xd7
'Ding ', # 0xd8
'Po ', # 0xd9
'Qiu ', # 0xda
'He ', # 0xdb
'Fu ', # 0xdc
'Zhen ', # 0xdd
'Zhi ', # 0xde
'Ba ', # 0xdf
'Luan ', # 0xe0
'Fu ', # 0xe1
'Nai ', # 0xe2
'Diao ', # 0xe3
'Shan ', # 0xe4
'Qiao ', # 0xe5
'Kou ', # 0xe6
'Chuan ', # 0xe7
'Zi ', # 0xe8
'Fan ', # 0xe9
'Yu ', # 0xea
'Hua ', # 0xeb
'Han ', # 0xec
'Gong ', # 0xed
'Qi ', # 0xee
'Mang ', # 0xef
'Ri ', # 0xf0
'Di ', # 0xf1
'Si ', # 0xf2
'Xi ', # 0xf3
'Yi ', # 0xf4
'Chai ', # 0xf5
'Shi ', # 0xf6
'Tu ', # 0xf7
'Xi ', # 0xf8
'Nu ', # 0xf9
'Qian ', # 0xfa
'Ishiyumi ', # 0xfb
'Jian ', # 0xfc
'Pi ', # 0xfd
'Ye ', # 0xfe
'Yin ', # 0xff
)
|
gpl-3.0
|
jameskdev/lge-kernel-batman_skt
|
tools/perf/python/twatch.py
|
3213
|
1338
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
gpl-2.0
|
damonkohler/sl4a
|
python/xmpppy/doc/examples/xtalk.py
|
87
|
2951
|
#!/usr/bin/python
# $Id: xtalk.py,v 1.4 2008/08/09 17:00:18 normanr Exp $
import sys,os,xmpp,time,select
class Bot:
def __init__(self,jabber,remotejid):
self.jabber = jabber
self.remotejid = remotejid
def register_handlers(self):
self.jabber.RegisterHandler('message',self.xmpp_message)
def xmpp_message(self, con, event):
type = event.getType()
fromjid = event.getFrom().getStripped()
body = event.getBody()
if type in ['message', 'chat', None] and fromjid == self.remotejid and body:
sys.stdout.write(body + '\n')
def stdio_message(self, message):
m = xmpp.protocol.Message(to=self.remotejid,body=message,typ='chat')
self.jabber.send(m)
def xmpp_connect(self):
con=self.jabber.connect()
if not con:
sys.stderr.write('could not connect!\n')
return False
sys.stderr.write('connected with %s\n'%con)
auth=self.jabber.auth(jid.getNode(),jidparams['password'],resource=jid.getResource())
if not auth:
sys.stderr.write('could not authenticate!\n')
return False
sys.stderr.write('authenticated using %s\n'%auth)
self.register_handlers()
return con
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Syntax: xtalk JID"
sys.exit(0)
tojid=sys.argv[1]
jidparams={}
if os.access(os.environ['HOME']+'/.xtalk',os.R_OK):
for ln in open(os.environ['HOME']+'/.xtalk').readlines():
if not ln[0] in ('#',';'):
key,val=ln.strip().split('=',1)
jidparams[key.lower()]=val
for mandatory in ['jid','password']:
if mandatory not in jidparams.keys():
open(os.environ['HOME']+'/.xtalk','w').write('#Uncomment fields before use and type in correct credentials.\n#[email protected]/resource (/resource is optional)\n#PASSWORD=juliet\n')
print 'Please point ~/.xtalk config file to valid JID for sending messages.'
sys.exit(0)
jid=xmpp.protocol.JID(jidparams['jid'])
cl=xmpp.Client(jid.getDomain())#,debug=[])
bot=Bot(cl,tojid)
if not bot.xmpp_connect():
sys.stderr.write("Could not connect to server, or password mismatch!\n")
sys.exit(1)
#cl.SendInitPresence(requestRoster=0) # you may need to uncomment this for old server
socketlist = {cl.Connection._sock:'xmpp',sys.stdin:'stdio'}
online = 1
while online:
(i , o, e) = select.select(socketlist.keys(),[],[],1)
for each in i:
if socketlist[each] == 'xmpp':
cl.Process(1)
elif socketlist[each] == 'stdio':
msg = sys.stdin.readline().rstrip('\r\n')
bot.stdio_message(msg)
else:
raise Exception("Unknown socket type: %s" % repr(socketlist[each]))
#cl.disconnect()
|
apache-2.0
|
yesudeep/cmc
|
app/console/app/pygments/styles/native.py
|
23
|
1917
|
# -*- coding: utf-8 -*-
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: 2006-2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#202020'
highlight_color = '#404040'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #999999',
Comment.Preproc: 'noitalic bold #cd2828',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ab825',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ab825',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#3677a9',
Name.Builtin: '#24909d',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #447fcf',
Name.Function: '#447fcf',
Name.Namespace: 'underline #447fcf',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ab825',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#d22323',
Generic.Inserted: '#589819',
Generic.Error: '#d22323',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#d22323',
Error: 'bg:#e3d2d2 #a61717'
}
|
mit
|
zzicewind/linux
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
1891
|
3300
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
gpl-2.0
|
mindpin/mindpin_oppia
|
core/controllers/reader.py
|
1
|
11151
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia reader view."""
__author__ = 'Sean Lip'
from core.controllers import base
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import skins_services
from core.domain import stats_services
from core.domain import widget_registry
import feconf
import jinja_utils
import jinja2
def require_viewer(handler):
"""Decorator that checks if the user can view the given exploration."""
def test_can_view(self, exploration_id, **kwargs):
"""Checks if the user for the current session is logged in."""
if rights_manager.Actor(self.user_id).can_view(exploration_id):
return handler(self, exploration_id, **kwargs)
else:
raise self.PageNotFoundException
return test_can_view
class ExplorationPage(base.BaseHandler):
"""Page describing a single exploration."""
@require_viewer
def get(self, exploration_id):
"""Handles GET requests."""
version = self.request.get('v')
if not version:
# The default value for a missing parameter seems to be ''.
version = None
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
if not rights_manager.Actor(self.user_id).can_view(exploration_id):
raise self.PageNotFoundException
is_iframed = (self.request.get('iframed') == 'true')
# TODO(sll): Cache these computations.
interactive_widget_ids = exploration.get_interactive_widget_ids()
widget_js_directives = (
widget_registry.Registry.get_noninteractive_widget_js() +
widget_registry.Registry.get_interactive_widget_js(
interactive_widget_ids))
self.values.update({
'content': skins_services.get_skin_html(exploration.default_skin),
'exploration_version': version,
'iframed': is_iframed,
'is_private': rights_manager.is_exploration_private(exploration_id),
'nav_mode': feconf.NAV_MODE_EXPLORE,
'widget_js_directives': jinja2.utils.Markup(widget_js_directives),
})
if is_iframed:
self.render_template(
'reader/reader_exploration.html', iframe_restriction=None)
else:
self.render_template('reader/reader_exploration.html')
class ExplorationHandler(base.BaseHandler):
"""Provides the initial data for a single exploration."""
def get(self, exploration_id):
"""Populates the data on the individual exploration page."""
# TODO(sll): Maybe this should send a complete state machine to the
# frontend, and all interaction would happen client-side?
version = self.request.get('v')
if not version:
version = None
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except Exception as e:
raise self.PageNotFoundException(e)
init_params = exploration.get_init_params()
reader_params = exploration.update_with_state_params(
exploration.init_state_name, init_params)
init_state = exploration.init_state
interactive_widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, init_state.widget.widget_id)
interactive_html = interactive_widget.get_interactive_widget_tag(
init_state.widget.customization_args, reader_params)
self.values.update({
'block_number': 0,
'init_html': init_state.content[0].to_html(reader_params),
'interactive_html': interactive_html,
'params': reader_params,
'state_history': [exploration.init_state_name],
'state_name': exploration.init_state_name,
'title': exploration.title,
})
self.render_json(self.values)
stats_services.EventHandler.record_state_hit(
exploration_id, exploration.init_state_name, True)
class FeedbackHandler(base.BaseHandler):
"""Handles feedback to readers."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
def _append_answer_to_stats_log(
self, old_state, answer, exploration_id, old_state_name,
old_params, handler, rule):
"""Append the reader's answer to the statistics log."""
widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, old_state.widget.widget_id)
recorded_answer = widget.get_stats_log_html(
old_state.widget.customization_args, old_params, answer)
stats_services.EventHandler.record_answer_submitted(
exploration_id, old_state_name, handler, str(rule),
recorded_answer)
def _append_content(self, exploration, sticky, finished, old_params,
new_state, new_state_name, state_has_changed,
html_output):
"""Appends content for the new state to the output variables."""
if finished:
return {}, html_output, ''
else:
# Populate new parameters.
new_params = exploration.update_with_state_params(
new_state_name, old_params)
if state_has_changed:
# Append the content for the new state.
state_html = exploration.states[
new_state_name].content[0].to_html(new_params)
if html_output and state_html:
html_output += '<br>'
html_output += state_html
interactive_html = (
'' if sticky else
widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, new_state.widget.widget_id
).get_interactive_widget_tag(
new_state.widget.customization_args, new_params)
)
return (new_params, html_output, interactive_html)
@require_viewer
def post(self, exploration_id, escaped_state_name):
"""Handles feedback interactions with readers."""
old_state_name = self.unescape_state_name(escaped_state_name)
# The reader's answer.
answer = self.payload.get('answer')
# The answer handler (submit, click, etc.)
handler = self.payload.get('handler')
# The 0-based index of the last content block already on the page.
block_number = self.payload.get('block_number') + 1
# Parameters associated with the reader.
old_params = self.payload.get('params', {})
old_params['answer'] = answer
# The reader's state history.
state_history = self.payload['state_history']
# The version of the exploration.
version = self.payload.get('version')
values = {}
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
old_state = exploration.states[old_state_name]
rule = exploration.classify(
old_state_name, handler, answer, old_params)
feedback = rule.get_feedback_string()
new_state_name = rule.dest
new_state = (
None if new_state_name == feconf.END_DEST
else exploration.states[new_state_name])
stats_services.EventHandler.record_state_hit(
exploration_id, new_state_name,
(new_state_name not in state_history))
state_history.append(new_state_name)
# If the new state widget is the same as the old state widget, and the
# new state widget is sticky, do not render the reader response. The
# interactive widget in the frontend should take care of this.
# TODO(sll): This special-casing is not great; we should
# make the interface for updating the frontend more generic so that
# all the updates happen in the same place. Perhaps in the non-sticky
# case we should call a frontend method named appendFeedback() or
# similar.
sticky = (
new_state_name != feconf.END_DEST and
new_state.widget.sticky and
new_state.widget.widget_id == old_state.widget.widget_id
)
self._append_answer_to_stats_log(
old_state, answer, exploration_id, old_state_name, old_params,
handler, rule)
# Append the reader's answer to the response HTML.
old_widget = widget_registry.Registry.get_widget_by_id(
feconf.INTERACTIVE_PREFIX, old_state.widget.widget_id)
reader_response_html = old_widget.get_reader_response_html(
old_state.widget.customization_args, old_params, answer, sticky)
values['reader_response_html'] = reader_response_html
# Add Oppia's feedback to the response HTML.
html_output = '<div>%s</div>' % jinja_utils.parse_string(
feedback, old_params)
# Add the content for the new state to the response HTML.
finished = (new_state_name == feconf.END_DEST)
state_has_changed = (old_state_name != new_state_name)
new_params, html_output, interactive_html = (
self._append_content(
exploration, sticky, finished, old_params, new_state,
new_state_name, state_has_changed, html_output))
values.update({
'interactive_html': interactive_html,
'exploration_id': exploration_id,
'state_name': new_state_name,
'oppia_html': html_output,
'block_number': block_number,
'params': new_params,
'finished': finished,
'state_history': state_history,
})
self.render_json(values)
class ReaderFeedbackHandler(base.BaseHandler):
"""Submits feedback from the reader."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@require_viewer
def post(self, exploration_id, escaped_state_name):
"""Handles POST requests."""
state_name = self.unescape_state_name(escaped_state_name)
feedback = self.payload.get('feedback')
state_history = self.payload.get('state_history')
version = self.payload.get('version')
# TODO(sll): Add the reader's history log here.
stats_services.EventHandler.record_state_feedback_from_reader(
exploration_id, state_name, feedback,
{'state_history': state_history})
|
apache-2.0
|
madan96/sympy
|
sympy/printing/rcode.py
|
7
|
14467
|
"""
R code printer
The RCodePrinter converts single sympy expressions into single R expressions,
using the functions defined in math.h where possible.
"""
from __future__ import print_function, division
from sympy.core import S
from sympy.core.compatibility import string_types, range
from sympy.codegen.ast import Assignment
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
from sympy.sets.fancysets import Range
# dictionary mapping sympy function to (argument_conditions, C_function).
# Used in RCodePrinter._print_Function(self)
known_functions = {
#"Abs": [(lambda x: not x.is_integer, "fabs")],
"Abs": "abs",
"gamma": "gamma",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"erf": "erf",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"floor": "floor",
"ceiling": "ceiling",
"sign": "sign",
}
# These are the core reserved words in the R language. Taken from:
# https://cran.r-project.org/doc/manuals/r-release/R-lang.html#Reserved-words
reserved_words = ['if',
'else',
'repeat',
'while',
'function',
'for',
'in',
'next',
'break',
'TRUE',
'FALSE',
'NULL',
'Inf',
'NaN',
'NA',
'NA_integer_',
'NA_real_',
'NA_complex_',
'NA_character_',
'volatile']
class RCodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of R code"""
printmethod = "_rcode"
language = "R"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
}
_operators = {
'and':'&',
'or': '|',
}
_relationals = {
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
self._dereference = set(settings.get('dereference', []))
self.reserved_words = set(reserved_words)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {0}".format(text)
def _declare_number_const(self, name, value):
return "{0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
"""Returns a tuple (open_lines, close_lines) containing lists of codelines
"""
open_lines = []
close_lines = []
loopstart = "for (%(var)s in %(start)s:%(end)s){"
for i in indices:
# R arrays start at 1 and end at dimension
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower+1),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
if "Pow" in self.known_functions:
return self._print_Function(expr)
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d.0/%d.0' % (p, q)
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s[%s]" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "exp(1)"
def _print_Pi(self, expr):
return 'pi'
def _print_Infinity(self, expr):
return 'Inf'
def _print_NegativeInfinity(self, expr):
return '-Inf'
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.tensor.indexed import IndexedBase
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
#if isinstance(expr.rhs, Piecewise):
# # Here we modify Piecewise so each expression is now
# # an Assignment, and then continue on the print.
# expressions = []
# conditions = []
# for (e, c) in rhs.args:
# expressions.append(Assignment(lhs, e))
# conditions.append(c)
# temp = Piecewise(*zip(expressions, conditions))
# return self._print(temp)
#elif isinstance(lhs, MatrixSymbol):
if isinstance(lhs, MatrixSymbol):
# Here we form an Assignment for each element in the array,
# printing each one.
lines = []
for (i, j) in self._traverse_matrix_indices(lhs):
temp = Assignment(lhs[i, j], rhs[i, j])
code0 = self._print(temp)
lines.append(code0)
return "\n".join(lines)
elif self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Piecewise(self, expr):
# This method is called only for inline if constructs
# Top level piecewise is handled in doprint()
if expr.args[-1].cond == True:
last_line = "%s" % self._print(expr.args[-1].expr)
else:
last_line = "ifelse(%s,%s,NA)" % (self._print(expr.args[-1].cond), self._print(expr.args[-1].expr))
code=last_line
for e, c in reversed(expr.args[:-1]):
code= "ifelse(%s,%s," % (self._print(c), self._print(e))+code+")"
return(code)
def _print_ITE(self, expr):
from sympy.functions import Piecewise
_piecewise = Piecewise((expr.args[1], expr.args[0]), (expr.args[2], True))
return self._print(_piecewise)
def _print_MatrixElement(self, expr):
return "{0}[{1}]".format(expr.parent, expr.j +
expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super(RCodePrinter, self)._print_Symbol(expr)
if expr in self._dereference:
return '(*{0})'.format(name)
else:
return name
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return ("{0} {1} {2}").format(lhs_code, op, rhs_code)
def _print_sinc(self, expr):
from sympy.functions.elementary.trigonometric import sin
from sympy.core.relational import Ne
from sympy.functions import Piecewise
_piecewise = Piecewise(
(sin(expr.args[0]) / expr.args[0], Ne(expr.args[0], 0)), (1, True))
return self._print(_piecewise)
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
op = expr.rel_op
rhs_code = self._print(expr.rhs)
return "{0} {1} {2};".format(lhs_code, op, rhs_code)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('for ({target} = {start}; {target} < {stop}; {target} += '
'{step}) {{\n{body}\n}}').format(target=target, start=start,
stop=stop, step=step, body=body)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def rcode(expr, assign_to=None, **settings):
"""Converts an expr to a string of r code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired R string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
rfunction_string)] or [(argument_test, rfunction_formater)]. See below
for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import rcode, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> rcode((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau^(7.0/2.0)'
>>> rcode(sin(x), assign_to="s")
's = sin(x);'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")],
... "func": "f"
... }
>>> func = Function('func')
>>> rcode(func(Abs(x) + ceiling(x)), user_functions=custom_functions)
'f(fabs(x) + CEIL(x))'
or if the R-function takes a subset of the original arguments:
>>> rcode(2**x + 3**x, user_functions={'Pow': [
... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e),
... (lambda b, e: b != 2, 'pow')]})
'exp2(x) + pow(3, x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(rcode(expr, assign_to=tau))
tau = ifelse(x > 0,x + 1,x);
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> rcode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(rcode(mat, A))
A[0] = x^2;
A[1] = ifelse(x > 0,x + 1,x);
A[2] = sin(x);
"""
return RCodePrinter(settings).doprint(expr, assign_to)
def print_rcode(expr, **settings):
"""Prints R representation of the given expression."""
print(rcode(expr, **settings))
|
bsd-3-clause
|
jss-emr/openerp-7-src
|
openerp/addons/l10n_br/__init__.py
|
430
|
1403
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import account
|
agpl-3.0
|
siconos/siconos-deb
|
wrap/doxy2swig.py
|
1
|
17516
|
#!/usr/bin/env python
"""Doxygen XML to SWIG docstring converter.
Usage:
doxy2swig.py [options] input.xml output.i
Converts Doxygen generated XML files into a file containing docstrings
that can be used by SWIG-1.3.x. Note that you need to get SWIG
version > 1.3.23 or use Robin Dunn's docstring patch to be able to use
the resulting output.
input.xml is your doxygen generated XML file and output.i is where the
output will be written (the file will be clobbered).
"""
######################################################################
#
# This code is implemented using Mark Pilgrim's code as a guideline:
# http://www.faqs.org/docs/diveintopython/kgp_divein.html
#
# Author: Prabhu Ramachandran
# License: BSD style
#
# Thanks:
# Johan Hake: the include_function_definition feature
# Bill Spotz: bug reports and testing.
# Sebastian Henschel: Misc. enhancements.
#
######################################################################
import shlex
from xml.dom import minidom
import re
import textwrap
import sys
import types
import os.path
import optparse
def my_open_read(source):
if hasattr(source, "read"):
return source
else:
return open(source)
def my_open_write(dest):
if hasattr(dest, "write"):
return dest
else:
return open(dest, 'wb')
class Doxy2SWIG:
"""Converts Doxygen generated XML files into a file containing
docstrings that can be used by SWIG-1.3.x that have support for
feature("docstring"). Once the data is parsed it is stored in
self.pieces.
"""
def __init__(self, src, include_function_definition=True, quiet=False):
"""Initialize the instance given a source object. `src` can
be a file or filename. If you do not want to include function
definitions from doxygen then set
`include_function_definition` to `False`. This is handy since
this allows you to use the swig generated function definition
using %feature("autodoc", [0,1]).
"""
f = my_open_read(src)
self.src = src
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
self.pieces.append('\n// File: %s\n'%\
os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.multi = 0
self.ignores = ['inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref']
#self.generics = []
self.include_function_definition = include_function_definition
if not include_function_definition:
self.ignores.append('argsstring')
self.quiet = quiet
def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc)
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
# replacements for swig docstrings and processed xml
txt = node.data
# txt = txt.replace('SP::', '')
# txt = txt.replace('SA::', '')
# txt = txt.replace('SPC::', '')
# txt = txt.replace('std::', '')
# txt = txt.replace('std11::', '')
# txt = txt.replace('boost::', '')
# txt = txt.replace('boost::', '')
# processed xml update
node.data = txt
# replacements for swig docstrings only
txt = txt.replace('\\', r'\\\\')
txt = txt.replace('"', r'\"')
# ignore pure whitespace
m = self.space_re.match(txt)
if m and len(m.group()) == len(txt):
pass
else:
self.add_text(textwrap.fill(txt, break_long_words=False))
def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node)
#if name not in self.generics: self.generics.append(name)
def parse_Comment(self, node):
"""Parse a `COMMENT_NODE`. This does nothing for now."""
return
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if hasattr(value, '__iter__'):
self.pieces.extend(value)
else:
self.pieces.append(value)
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes \
if x.nodeType == x.ELEMENT_NODE and \
x.tagName in names]
return dict(nodes)
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n')
def space_parse(self, node):
self.add_text(' ')
self.generic_parse(node)
do_ref = space_parse
do_emphasis = space_parse
do_bold = space_parse
do_computeroutput = space_parse
def do_formula(self, node):
self.add_text(' ')
data = '{0}'.format(node.firstChild.data).strip().\
replace('\\', r'\\\\').\
replace('"', r'\"').replace('$', '').strip()
if len(data) <= 20:
self.add_text(' :math:`{0}` '.format(data))
else:
self.add_text("""
.. math::
:nowrap:
{0}
""".format(data))
def do_compoundname(self, node):
self.add_text('\n\n')
data = node.firstChild.data
self.add_text('%%feature("docstring") %s "\n'%data)
def do_compounddef(self, node):
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot != 'public':
return
names = ('compoundname', 'briefdescription',
'detaileddescription', 'includes')
first = self.get_specific_nodes(node, names)
for n in names:
if n in first:
self.parse(first[n])
self.add_text(['";','\n'])
for n in node.childNodes:
if n not in first.values():
self.parse(n)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
def do_includes(self, node):
pass
# self.add_text('C++ includes: ')
# self.generic_parse(node, pad=1)
def do_parameterlist(self, node):
text='unknown'
for key, val in node.attributes.items():
if key == 'kind':
if val == 'param': text = 'Parameters'
elif val == 'exception': text = 'Exceptions'
else: text = val
break
self.add_text(['\n', '\n', text, ':', '\n'])
self.generic_parse(node, pad=1)
def do_para(self, node):
self.add_text('\n')
self.generic_parse(node, pad=1)
def do_parametername(self, node):
self.add_text('\n')
try:
data = node.firstChild.data
except AttributeError: # perhaps a <ref> tag in it
if hasattr(node, 'firstChild'):
if hasattr(node.firstChild, 'firstChild'):
data = node.firstChild.firstChild.data
else:
return
else:
return
if data.find('Exception') != -1:
self.add_text(data)
else:
self.add_text("%s: "%data)
def do_parameterdefinition(self, node):
self.generic_parse(node, pad=1)
def do_detaileddescription(self, node):
self.generic_parse(node, pad=1)
def do_briefdescription(self, node):
self.generic_parse(node, pad=1)
def do_memberdef(self, node):
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if prot == 'public':
first = self.get_specific_nodes(node, ('definition', 'name'))
name = first['name'].firstChild.data
if name[:8] == 'operator': # Don't handle operators yet.
return
#if 'definition ' not in first or \
# kind in ['variable', 'typedef']:
# return
if self.include_function_definition:
defn = first['definition'].firstChild.data
# remove return type information
defn = '.'.join(shlex.split(defn)[-1].split('::'))
first['definition'].firstChild.data = defn
else:
defn = ""
self.add_text('\n')
self.add_text('%feature("docstring") ')
anc = node.parentNode.parentNode
if cdef_kind in ('file', 'namespace'):
ns_node = anc.getElementsByTagName('innernamespace')
if not ns_node and cdef_kind == 'namespace':
ns_node = anc.getElementsByTagName('compoundname')
if ns_node:
ns = ns_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(ns, name, defn))
else:
self.add_text(' %s "\n%s'%(name, defn))
elif cdef_kind in ('class', 'struct'):
# Get the full function name.
anc_node = anc.getElementsByTagName('compoundname')
cname = anc_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(cname, name, defn))
for n in node.childNodes:
if n not in first.values():
self.parse(n)
self.add_text(['";', '\n'])
def do_definition(self, node):
data = node.firstChild.data
self.add_text('%s "\n%s'%(data, data))
def do_sectiondef(self, node):
kind = node.attributes['kind'].value
if kind in ('public-func', 'func', 'user-defined', ''):
self.generic_parse(node)
def do_header(self, node):
"""For a user defined section def a header field is present
which should not be printed as such, so we comment it in the
output."""
data = node.firstChild.data
self.add_text('\n/*\n %s \n*/\n'%data)
# If our immediate sibling is a 'description' node then we
# should comment that out also and remove it from the parent
# node's children.
parent = node.parentNode
idx = parent.childNodes.index(node)
if len(parent.childNodes) >= idx + 2:
nd = parent.childNodes[idx+2]
if nd.nodeName == 'description':
nd = parent.removeChild(nd)
self.add_text('\n/*')
self.generic_parse(nd)
self.add_text('\n*/\n')
def do_simplesect(self, node):
kind = node.attributes['kind'].value
if kind in ('date', 'rcs', 'version'):
pass
elif kind == 'warning':
self.add_text(['\n', 'WARNING: '])
self.generic_parse(node)
elif kind == 'see':
self.add_text('\n')
self.add_text('See: ')
self.generic_parse(node)
else:
self.generic_parse(node)
def do_argsstring(self, node):
pass
# args = node.firstChild.data
# remove chars after closing parent
# a1 = args.split(')')[0]
# a2 = [ shlex.split(a) for a in a1.split('(')[1].split(',') ]
# a3 = []
# for l in a2:
# if len(l) > 0:
# a3 += [ l[-1].strip('&*').replace('false','False').replace('true','True') ]
# else:
# a3 += [ '' ]
# a4 = ', '.join(a3)
# node.firstChild.data = a4
# self.add_text('({0})'.format(a4))
def do_member(self, node):
kind = node.attributes['kind'].value
refid = node.attributes['refid'].value
if kind == 'function' and refid[:9] == 'namespace':
self.generic_parse(node)
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
if not self.quiet:
print("parsing file: %s"%fname)
p = Doxy2SWIG(fname, self.include_function_definition, self.quiet)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
def write(self, fname):
o = my_open_write(fname)
if self.multi:
o.write("".join(self.pieces).encode('ascii', 'ignore').strip())
else:
o.write("".join(self.clean_pieces(self.pieces)).encode('ascii', 'ignore').strip())
o.close()
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:' or i == 'Exceptions:':
ret.extend([i, '\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
_tmp = i.strip()
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def convert(input, output, include_function_definition=True, quiet=False):
p = Doxy2SWIG(input, include_function_definition, quiet)
p.generate()
dir_input = os.path.dirname(input)
pdir = os.path.join(dir_input, 'processed')
try:
os.mkdir(pdir)
except:
pass
base_input = os.path.basename(input)
# try:
with open(os.path.join(pdir,'{0}'.format(base_input)), 'wb') as pxml_file:
pxml_file.write(p.xmldoc.toxml().encode('ascii', 'ignore').strip())
p.write(output)
#except Exception as e:
#print ('doxy2swig.py: {0}'.format(e))
def main():
usage = __doc__
parser = optparse.OptionParser(usage)
parser.add_option("-n", '--no-function-definition',
action='store_true',
default=False,
dest='func_def',
help='do not include doxygen function definitions')
parser.add_option("-q", '--quiet',
action='store_true',
default=False,
dest='quiet',
help='be quiet and minimize output')
options, args = parser.parse_args()
if len(args) != 2:
parser.error("error: no input and output specified")
convert(args[0], args[1], not options.func_def, options.quiet)
if __name__ == '__main__':
main()
|
apache-2.0
|
TechInvestLab/dot15926
|
editor_qt/iso15926/patterns/patterns_actions.py
|
1
|
15984
|
"""
.15925 Editor
Copyright 2014 TechInvestLab.ru [email protected]
.15925 Editor is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3.0 of the License, or (at your option) any later version.
.15925 Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with .15925 Editor.
"""
import iso15926.common.dialogs as dialogs
from framework.dialogs import Notify, Choice
from _ordereddict import ordereddict
def GenPartName(option, default = None):
if default:
i = 0
name_gen = default
else:
i = 1
name_gen = 'entity%i'%i
while True:
for p in option['parts']:
if 'self' in p and p['self']==name_gen:
break
else:
return name_gen
i += 1
name_gen = 'entity%i'%i
class DocumentPropertyChange():
props_change = True
def __init__(self, doc, prop, value):
self.doc = doc
self.prop = prop
self.value = value
def Redo(self):
self.old_value = getattr(self.doc, self.prop, None)
if self.old_value == self.value:
return False
self.doc.UpdateProps({self.prop: self.value})
return True
def Undo(self):
self.doc.UpdateProps({self.prop: self.old_value})
self.doc.RefreshProps()
class DocumentModifyPatterns():
def __init__(self, doc, pattern, delete = False, new = True):
self.doc = doc
self.pattern = pattern
self.delete = delete
self.new = new
def Redo(self):
if self.delete:
if self.pattern['name'] not in self.doc.patterns or self.doc.patterns[self.pattern['name']] != self.pattern:
return False
del self.doc.patterns[self.pattern['name']]
wizard.W_PatternDeleted(self.pattern)
else:
if not self.new:
counter = 1
name = self.pattern['name']
while name:
for p in self.doc.patterns.iterkeys():
if name == p:
name = '%s%i'%(self.pattern['name'], counter)
counter += 1
break
else:
self.pattern['name'] = name
break
if self.pattern['name'] in self.doc.patterns:
Notify(tm.main.pattern_already_exist)
return False
self.doc.patterns[self.pattern['name']] = self.pattern
wizard.W_PatternAdded(self.doc, self.pattern, new = self.new)
self.new = False
return True
def Undo(self):
if self.delete:
self.doc.patterns[self.pattern['name']] = self.pattern
wizard.W_PatternAdded(self.doc, self.pattern)
else:
del self.doc.patterns[self.pattern['name']]
wizard.W_PatternDeleted(self.pattern)
class DocumentChangePatternName():
def __init__(self, doc, pattern, new_name):
self.doc = doc
self.pattern = pattern
self.new_name = new_name
def Redo(self):
self.old_name = self.pattern['name']
if not self.new_name:
Notify(tm.main.empty_pattern_name)
return False
if self.old_name == self.new_name:
return False
if self.new_name in self.doc.patterns:
Notify(tm.main.pattern_option_already_exist)
return False
self.pattern['name'] = self.new_name
del self.doc.patterns[self.old_name]
self.doc.patterns[self.new_name] = self.pattern
wizard.W_PatternNameChanged(self.pattern)
return True
def Undo(self):
self.pattern['name'] = self.old_name
del self.doc.patterns[self.new_name]
self.doc.patterns[self.old_name] = self.pattern
wizard.W_PatternNameChanged(self.pattern)
class DocumentChangePatternProperty():
def __init__(self, doc, pattern, prop, value):
self.doc = doc
self.pattern = pattern
self.prop = prop
self.value = value
def Redo(self):
self.old_value = self.pattern.get(self.prop, type(self.value)())
if self.value == self.old_value:
return False
self.pattern[self.prop] = self.value
wizard.W_PatternPropsChanged(self.pattern)
return True
def Undo(self):
self.pattern[self.prop] = self.old_value
wizard.W_PatternPropsChanged(self.pattern)
class DocumentChangePatternOptionName():
def __init__(self, pattern, option, new_name):
self.pattern = pattern
self.option = option
self.new_name = new_name
def Redo(self):
self.old_name = self.option.get('name')
if self.old_name == self.new_name:
return False
if self.new_name:
if self.new_name in (o.get('name') for o in self.pattern['options']):
Notify(tm.main.pattern_option_already_exist)
return False
self.option['name'] = self.new_name
elif self.old_name:
del self.option['name']
wizard.W_PatternOptionChanged(self.option)
return True
def Undo(self):
if self.old_name:
self.option['name'] = self.old_name
elif self.new_name:
del self.option['name']
wizard.W_PatternOptionChanged(self.option)
class DocumentChangePatternSignature():
def __init__(self, pattern, role, data, old_role = None):
self.pattern = pattern
self.role = role
self.data = data
self.old_role = old_role
def Redo(self):
if self.old_role:
self.old_data = self.pattern['signature'][self.old_role]
del self.pattern['signature'][self.old_role]
if self.role:
self.pattern['signature'][self.role] = self.data
wizard.W_PatternSignatureChanged(self.pattern, self.role, self.old_role)
return True
def Undo(self):
if self.role:
del self.pattern['signature'][self.role]
if self.old_role:
self.pattern['signature'][self.old_role] = self.old_data
wizard.W_PatternSignatureChanged(self.pattern, self.old_role, self.role)
class DocumentModifyPatternOptions():
def __init__(self, pattern, option, delete = False, new = True):
self.pattern = pattern
self.option = option
self.delete = delete
self.new = new
def Redo(self):
if self.delete:
if self.option not in self.pattern['options']:
return False
self.idx = self.pattern['options'].index(self.option)
del self.pattern['options'][self.idx]
wizard.W_PatternOptionDeleted(self.option)
else:
if self.option in self.pattern['options']:
return False
if not self.new:
counter = 1
name = self.option.get('name')
while name:
for o in self.pattern['options']:
if name == o.get('name'):
name = '%s%i'%(self.option['name'], counter)
counter += 1
break
else:
self.option['name'] = name
break
self.pattern['options'].append(self.option)
wizard.W_PatternOptionAdded(self.pattern, self.option, new = self.new)
self.new = False
return True
def Undo(self):
if self.delete:
self.pattern['options'].insert(self.idx, self.option)
wizard.W_PatternOptionAdded(self.pattern, self.option)
else:
self.pattern['options'].remove(self.option)
wizard.W_PatternOptionDeleted(self.option)
class DocumentModifyPatternOptionParts():
def __init__(self, option, part, delete = False):
self.option = option
self.part = part
self.delete = delete
def Redo(self):
if self.delete:
if self.part not in self.option['parts']:
return False
self.idx = self.option['parts'].index(self.part)
del self.option['parts'][self.idx]
wizard.W_PatternOptionPartDeleted(self.option, self.part)
else:
if self.part in self.option['parts']:
return False
if 'type' not in self.part or not self.part['type'].startswith('patterns.'):
name = self.part.get('self', None)
self.part['self'] = GenPartName(self.option, name)
self.option['parts'].append(self.part)
wizard.W_PatternOptionPartAdded(self.option, self.part)
return True
def Undo(self):
if self.delete:
self.option['parts'].insert(self.idx, self.part)
wizard.W_PatternOptionPartAdded(self.option, self.part)
else:
self.option['parts'].remove(self.part)
wizard.W_PatternOptionPartDeleted(self.option, self.part)
class DocumentChangePatternOptionPartIndex():
def __init__(self, option, part, new_idx):
self.option = option
self.part = part
self.new_idx = new_idx
def Redo(self):
if self.new_idx >= len(self.option['parts']):
return False
self.old_idx = self.option['parts'].index(self.part)
if self.old_idx == self.new_idx:
return False
del self.option['parts'][self.old_idx]
self.option['parts'].insert(self.new_idx, self.part)
wizard.W_PatternOptionPartIndexChanged(self.option, self.part)
return True
def Undo(self):
del self.option['parts'][self.new_idx]
self.option['parts'].insert(self.old_idx, self.part)
wizard.W_PatternOptionPartIndexChanged(self.option, self.part)
class DocumentModifyPatternOptionPartName():
def __init__(self, part, name):
self.part = part
self.name = name
def Redo(self):
self.old_name = self.part.get('self')
if self.old_name == self.name:
return False
if self.name:
self.part['self'] = self.name
else:
del self.part['self']
wizard.W_PatternOptionPartNameChanged(self.part)
def Undo(self):
if self.old_name:
self.part['self'] = self.old_name
else:
del self.part['self']
wizard.W_PatternOptionPartNameChanged(self.part)
ROLE_ADD = 0
ROLE_DELETE = 1
ROLE_BIND = 2
ROLE_RENAME = 3
ROLE_MODIFY = 4
class DocumentModifyPatternOptionPartRoles():
def __init__(self, option, part, role, value = None, action = None):
self.option = option
self.part = part
self.role = role
self.value = value
self.action = action
self.other = None
def Redo(self):
if self.action == ROLE_DELETE:
if self.role not in self.part:
return False
self.value = self.part[self.role]
del self.part[self.role]
wizard.W_PatternOptionPartRoleDeleted(self.part, self.role)
elif self.action == ROLE_ADD:
if self.role in self.part:
Notify(tm.main.pattern_option_part_role_already_exist)
return False
self.part[self.role] = self.value
wizard.W_PatternOptionPartRoleAdded(self.part, self.role)
elif self.action == ROLE_BIND:
if self.value == self.part[self.role]:
return False
self.old_value = self.part[self.role]
self.part[self.role] = self.value
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.role)
elif self.action == ROLE_RENAME:
if self.value == self.role:
return False
if self.value in self.part:
Notify(tm.main.pattern_option_part_role_already_exist)
return False
self.part[self.value] = self.part[self.role]
del self.part[self.role]
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.value)
elif self.action == ROLE_MODIFY:
if self.role not in self.part:
return False
self.old_value = self.part[self.role]
if self.value[0] == self.role and self.value[1] == self.old_value:
return False
if self.value[0] != self.role:
if self.value[0] in self.part:
Notify(tm.main.pattern_option_part_role_already_exist)
return False
del self.part[self.role]
self.part[self.value[0]] = self.value[1]
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.value[0])
if 'self' in self.part and 'type' in self.part and isinstance(self.part['type'], basestring) and self.part['type'].startswith('patterns.'):
self.other = DocumentModifyPatternOptionPartRoles(self.option, self.part, 'self', action = ROLE_DELETE)
elif 'self' not in self.part and ('type' not in self.part or not isinstance(self.part['type'], basestring) or not self.part['type'].startswith('patterns.')):
self.other = DocumentModifyPatternOptionPartRoles(self.option, self.part, 'self', value = GenPartName(self.option), action = ROLE_ADD)
if self.other and not self.other.Redo():
self.other = None
return True
def Undo(self):
if self.other:
self.other.Undo()
if self.action == ROLE_DELETE:
self.part[self.role] = self.value
wizard.W_PatternOptionPartRoleAdded(self.part, self.role)
elif self.action == ROLE_ADD:
del self.part[self.role]
wizard.W_PatternOptionPartRoleDeleted(self.part, self.role)
elif self.action == ROLE_BIND:
self.part[self.role] = self.old_value
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.role)
elif self.action == ROLE_RENAME:
self.part[self.role] = self.part[self.value]
del self.part[self.value]
wizard.W_PatternOptionPartRoleChanged(self.part, self.value, self.role)
elif self.action == ROLE_MODIFY:
del self.part[self.value[0]]
self.part[self.role] = self.old_value
wizard.W_PatternOptionPartRoleChanged(self.part, self.value[0], self.role)
class DocumentChangePatternOptionPartRole():
def __init__(self, option, part, old_role, role, value):
self.option = option
self.part = part
self.old_role = old_role
self.role = role
self.value = value
self.name = None
def Redo(self):
self.old_value = self.part[self.old_role]
if self.old_value == self.value and self.old_role == self.role:
return False
del self.part[self.old_role]
self.part[self.role] = self.value
if self.role == 'type' and self.value.startswith('patterns.') and 'self' in self.part:
self.name = self.part['self']
del self.part['self']
wizard.W_PatternOptionPartRoleChanged(self.part, self.old_role, self.role)
return True
def Undo(self):
del self.part[self.role]
if self.name:
self.part['self'] = GenPartName(self.option, self.name)
self.part[self.old_role] = self.old_value
wizard.W_PatternOptionPartRoleChanged(self.part, self.role, self.old_role)
|
lgpl-3.0
|
vprime/puuuu
|
env/lib/python2.7/site-packages/django/utils/translation/trans_real.py
|
35
|
25577
|
"""Translation helper functions."""
from __future__ import unicode_literals
import locale
import os
import re
import sys
import gettext as gettext_module
from threading import local
import warnings
from django.utils.importlib import import_module
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import memoize
from django.utils._os import upath
from django.utils.safestring import mark_safe, SafeData
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
_checked_languages = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies
or session and during format localization.
"""
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
check_for_language = memoize(check_for_language, _checked_languages, 1)
def get_supported_language_variant(lang_code, supported=None, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
if lang_code:
# if fr-CA is not supported, try fr-ca; if that fails, fallback to fr.
generic_lang_code = lang_code.split('-')[0]
variants = (lang_code, lang_code.lower(), generic_lang_code,
generic_lang_code.lower())
for code in variants:
if code in supported and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported:
if supported_code.startswith((generic_lang_code + '-',
generic_lang_code.lower() + '-')):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, supported=None, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, supported, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
global _accepted
from django.conf import settings
supported = SortedDict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info, supported)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code, supported)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
try:
accept_lang = get_supported_language_variant(accept_lang, supported)
except LookupError:
continue
else:
_accepted[normalized] = accept_lang
return accept_lang
try:
return get_supported_language_variant(settings.LANGUAGE_CODE, supported)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.conf import settings
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO()
message_context = None
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural)))
else:
out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular)))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return force_str(out.getvalue())
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
if priority:
priority = float(priority)
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
|
mit
|
towerjoo/mindsbook
|
django/contrib/localflavor/es/forms.py
|
309
|
7537
|
# -*- coding: utf-8 -*-
"""
Spanish-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
import re
class ESPostalCodeField(RegexField):
"""
A form field that validates its input as a spanish postal code.
Spanish postal code is a five digits string, with two first digits
between 01 and 52, assigned to provinces code.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the range and format 01XXX - 52XXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPostalCodeField, self).__init__(
r'^(0[1-9]|[1-4][0-9]|5[0-2])\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
class ESPhoneNumberField(RegexField):
"""
A form field that validates its input as a Spanish phone number.
Information numbers are ommited.
Spanish phone numbers are nine digit numbers, where first digit is 6 (for
cell phones), 8 (for special phones), or 9 (for landlines and special
phones)
TODO: accept and strip characters like dot, hyphen... in phone number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(ESPhoneNumberField, self).__init__(r'^(6|8|9)\d{8}$',
max_length=None, min_length=None, *args, **kwargs)
class ESIdentityCardNumberField(RegexField):
"""
Spanish NIF/NIE/CIF (Fiscal Identification Number) code.
Validates three diferent formats:
NIF (individuals): 12345678A
CIF (companies): A12345678
NIE (foreigners): X12345678A
according to a couple of simple checksum algorithms.
Value can include a space or hyphen separator between number and letters.
Number length is not checked for NIF (or NIE), old values start with a 1,
and future values can contain digits greater than 8. The CIF control digit
can be a number or a letter depending on company type. Algorithm is not
public, and different authors have different opinions on which ones allows
letters, so both validations are assumed true for all types.
"""
default_error_messages = {
'invalid': _('Please enter a valid NIF, NIE, or CIF.'),
'invalid_only_nif': _('Please enter a valid NIF or NIE.'),
'invalid_nif': _('Invalid checksum for NIF.'),
'invalid_nie': _('Invalid checksum for NIE.'),
'invalid_cif': _('Invalid checksum for CIF.'),
}
def __init__(self, only_nif=False, *args, **kwargs):
self.only_nif = only_nif
self.nif_control = 'TRWAGMYFPDXBNJZSQVHLCKE'
self.cif_control = 'JABCDEFGHI'
self.cif_types = 'ABCDEFGHKLMNPQS'
self.nie_types = 'XT'
id_card_re = re.compile(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), re.IGNORECASE)
super(ESIdentityCardNumberField, self).__init__(id_card_re, max_length=None, min_length=None,
error_message=self.default_error_messages['invalid%s' % (self.only_nif and '_only_nif' or '')],
*args, **kwargs)
def clean(self, value):
super(ESIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
nif_get_checksum = lambda d: self.nif_control[int(d)%23]
value = value.upper().replace(' ', '').replace('-', '')
m = re.match(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), value)
letter1, number, letter2 = m.groups()
if not letter1 and letter2:
# NIF
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nif'])
elif letter1 in self.nie_types and letter2:
# NIE
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nie'])
elif not self.only_nif and letter1 in self.cif_types and len(number) in [7, 8]:
# CIF
if not letter2:
number, letter2 = number[:-1], int(number[-1])
checksum = cif_get_checksum(number)
if letter2 in (checksum, self.cif_control[checksum]):
return value
else:
raise ValidationError(self.error_messages['invalid_cif'])
else:
raise ValidationError(self.error_messages['invalid'])
class ESCCCField(RegexField):
"""
A form field that validates its input as a Spanish bank account or CCC
(Codigo Cuenta Cliente).
Spanish CCC is in format EEEE-OOOO-CC-AAAAAAAAAA where:
E = entity
O = office
C = checksum
A = account
It's also valid to use a space as delimiter, or to use no delimiter.
First checksum digit validates entity and office, and last one
validates account. Validation is done multiplying every digit of 10
digit value (with leading 0 if necessary) by number in its position in
string 1, 2, 4, 8, 5, 10, 9, 7, 3, 6. Sum resulting numbers and extract
it from 11. Result is checksum except when 10 then is 1, or when 11
then is 0.
TODO: allow IBAN validation too
"""
default_error_messages = {
'invalid': _('Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.'),
'checksum': _('Invalid checksum for bank account number.'),
}
def __init__(self, *args, **kwargs):
super(ESCCCField, self).__init__(r'^\d{4}[ -]?\d{4}[ -]?\d{2}[ -]?\d{10}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
super(ESCCCField, self).clean(value)
if value in EMPTY_VALUES:
return u''
control_str = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]
m = re.match(r'^(\d{4})[ -]?(\d{4})[ -]?(\d{2})[ -]?(\d{10})$', value)
entity, office, checksum, account = m.groups()
get_checksum = lambda d: str(11 - sum([int(digit) * int(control) for digit, control in zip(d, control_str)]) % 11).replace('10', '1').replace('11', '0')
if get_checksum('00' + entity + office) + get_checksum(account) == checksum:
return value
else:
raise ValidationError(self.error_messages['checksum'])
class ESRegionSelect(Select):
"""
A Select widget that uses a list of spanish regions as its choices.
"""
def __init__(self, attrs=None):
from es_regions import REGION_CHOICES
super(ESRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ESProvinceSelect(Select):
"""
A Select widget that uses a list of spanish provinces as its choices.
"""
def __init__(self, attrs=None):
from es_provinces import PROVINCE_CHOICES
super(ESProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
def cif_get_checksum(number):
s1 = sum([int(digit) for pos, digit in enumerate(number) if int(pos) % 2])
s2 = sum([sum([int(unit) for unit in str(int(digit) * 2)]) for pos, digit in enumerate(number) if not int(pos) % 2])
return (10 - ((s1 + s2) % 10)) % 10
|
bsd-3-clause
|
GarciaPL/TrafficCity
|
Streets4MPI/utils.py
|
2
|
1073
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# utils.py
# Copyright 2012 Joachim Nitschke
#
# This file is part of Streets4MPI.
#
# Streets4MPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Streets4MPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Streets4MPI. If not, see <http://www.gnu.org/licenses/>.
#
from array import array
from itertools import repeat
def merge_arrays(arrays):
merged_array = array("I", repeat(0, len(arrays[0])))
for arr in arrays:
if arr != None:
for index in range(0, len(arr)):
merged_array[index] += arr[index]
return merged_array
|
gpl-2.0
|
google-research/privateFM
|
privateFM/FM_simulate.py
|
1
|
4753
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulation (not actual implementation) for private FM sketch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import sqrt, log, exp, ceil
import numpy as np
import scipy.integrate as integrate
import scipy.special
from privateFM.utils import generate_max_geom, EasyDict
# ------------------------------------------------------------------------------
# FM sketch
# ------------------------------------------------------------------------------
def FM(k, gamma, eta, m, seed):
"""Non private FM.
Returns:
m rv ~ max{eta, max{Z_1,..., Z_k}} where Z_i~Geom(gamma/(1+gamma)).
"""
if k == 0:
print('FM gets k=0')
return -1
return generate_max_geom(k, gamma, eta, m, seed)
def set_k_p_eta(config):
"""A helper function for computing k_p and eta."""
epsilon, delta, m, gamma = config.epsilon, config.delta, config.m, config.gamma
if not 0 < epsilon < float('inf') or not 0 < delta < 1:
k_p = 0
eta = 0
else:
eps1 = epsilon / 4 / sqrt(m * log(1 / delta))
k_p = ceil(1 / (exp(eps1) - 1))
eta = ceil(-log(1 - exp(-eps1)) / log(1 + gamma))
if config.morePhantom:
k_p = max((1 + gamma)**eta, k_p)
return k_p, eta
def FMPrivate(k, config, seed, estimation_option='quantile'):
"""Private FM.
Args:
k: true # distinct
config: contains epsilon, delta, m, gamma
seed: random seed
estimation_option: quantile, mean_harmo, mean_geom
Returns:
estimation, i_max
"""
if config.epsilon > 0 and 0 < config.delta < 1:
assert config.epsilon <= 2 * log(1 / config.delta)
k_p, eta = set_k_p_eta(config)
I = FM(k + k_p, config.gamma, eta, config.m, seed)
param = EasyDict(config=config, k_p=k_p, factor=0)
return make_estimate(I, estimation_option, param), I
# ------------------------------------------------------------------------------
# Estimation
# ------------------------------------------------------------------------------
def make_estimate(I, option, param):
"""Make the final cardinality estimation given I.
Args:
option: quantile, mean_harmo, mean_geom
param: a dictionary containing k_p and config and factor (if use quantile)
Returns:
estimation
"""
assert option in ['quantile', 'mean_harmo', 'mean_geom']
gamma = param.config.gamma
k_p = param.k_p
m = param.config.m
I = np.array(I)
if option == 'quantile':
factor = param.factor
return (1 + gamma)**np.quantile(I, exp(-1) - gamma * factor) - k_p
debias = get_debias(m, option, gamma)
if option == 'mean_geom': # Durand & Frajolet http://algo.inria.fr/flajolet/Publications/DuFl03.pdf
return (1 + gamma)**np.mean(I) * debias - k_p
if option == 'mean_harmo': # HLL https://en.wikipedia.org/wiki/HyperLogLog
return m / np.sum(np.power(1 + gamma, -I)) * debias - k_p
raise ValueError('make_estimation gets wrong option.')
def get_debias(m, option, gamma):
if option == 'mean_geom':
return (scipy.special.gamma(-1 / m) *
((1 + gamma)**(-1 / m) - 1) / log(1 + gamma))**(-m) / (1 + gamma)
if option == 'mean_harmo':
if gamma == 1.0:
if m <= 16:
debias = 0.673
elif m <= 32:
debias = 0.697
elif m <= 64:
debias = 0.709
elif m >= 128:
debias = 0.7213 / (1 + 1.079 / m)
return debias
else:
debias = 1 / integrate.quad(
lambda u: (log((u + 1 + gamma) /
(u + 1)) / log(1 + gamma))**m * m, 0, float('inf'))[0]
if debias > 2:
m = 10000
debias = 1 / integrate.quad(
lambda u:
(log((u + 1 + gamma) /
(u + 1)) / log(1 + gamma))**m * m, 0, float('inf'))[0]
# print('gamma is larger than 2, changed')
return debias
|
apache-2.0
|
adconk/grandmaangieskitchen
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/common.py
|
366
|
19638
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
|
apache-2.0
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/structured/labs/serving/application/lib/click/_compat.py
|
19
|
23399
|
import re
import io
import os
import sys
import codecs
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
CYGWIN = sys.platform.startswith('cygwin')
# Determine local App Engine environment, per Google's own suggestion
APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
WIN = sys.platform.startswith('win') and not APP_ENGINE
DEFAULT_COLUMNS = 80
_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(stream, encoding, errors,
force_readable=False, force_writable=False):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors,
line_buffering=True,
force_readable=force_readable,
force_writable=force_writable)
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
def get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(self, stream, encoding, errors,
force_readable=False, force_writable=False, **extra):
self._stream = stream = _FixupStream(stream, force_readable,
force_writable)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
The forcing of readable and writable flags are there because some tools
put badly patched objects on sys (one such offender are certain version
of jupyter notebook).
"""
def __init__(self, stream, force_readable=False, force_writable=False):
self._stream = stream
self._force_readable = force_readable
self._force_writable = force_writable
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, 'read1', None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
if self._force_readable:
return True
x = getattr(self._stream, 'readable', None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
if self._force_writable:
return True
x = getattr(self._stream, 'writable', None)
if x is not None:
return x()
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
bytes = str
raw_input = raw_input
string_types = (str, unicode)
int_types = (int, long)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
# For Windows, we need to force stdout/stdin/stderr to binary if it's
# fetched for that. This obviously is not the most correct way to do
# it as it changes global state. Unfortunately, there does not seem to
# be a clear better way to do it as just reopening the file in binary
# mode does not change anything.
#
# An option would be to do what Python 3 does and to open the file as
# binary only, patch it back to the system, and then use a wrapper
# stream that converts newlines. It's not quite clear what's the
# correct option here.
#
# This code also lives in _winconsole for the fallback to the console
# emulation stream.
#
# There are also Windows environments where the `msvcrt` module is not
# available (which is why we use try-catch instead of the WIN variable
# here), such as the Google App Engine development server on Windows. In
# those cases there is just nothing we can do.
def set_binary_mode(f):
return f
try:
import msvcrt
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
try:
import fcntl
except ImportError:
pass
else:
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
return f
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
_wrap_std_stream('stdout')
return set_binary_mode(sys.stdout)
def get_binary_stderr():
_wrap_std_stream('stderr')
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
_wrap_std_stream('stdout')
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
_wrap_std_stream('stderr')
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
int_types = (int,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise, it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors,
force_readable=False):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set, we need to verify that the
# reader is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding, we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_reader, encoding, errors,
force_readable=force_readable)
def _force_correct_text_writer(text_writer, encoding, errors,
force_writable=False):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set, we need to verify that the
# writer is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding, we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_writer, encoding, errors,
force_writable=force_writable)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors,
force_readable=True)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors,
force_writable=True)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors,
force_writable=True)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
else:
value = value.encode('utf-8', 'surrogateescape') \
.decode('utf-8', 'replace')
return value
def get_streerror(e, default=None):
if hasattr(e, 'strerror'):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode('utf-8', 'replace')
return msg
def open_stream(filename, mode='r', encoding=None, errors='strict',
atomic=False):
# Standard streams first. These are simple because they don't need
# special handling for the atomic flag. It's entirely ignored.
if filename == '-':
if any(m in mode for m in ['w', 'a', 'x']):
if 'b' in mode:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if 'b' in mode:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
if encoding is None:
return open(filename, mode), True
return io.open(filename, mode, encoding=encoding, errors=errors), True
# Some usability stuff for atomic writes
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('Atomic writes only make sense with `w`-mode.')
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import tempfile
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
prefix='.__atomic-write')
if encoding is not None:
f = io.open(fd, mode, encoding=encoding, errors=errors)
else:
f = os.fdopen(fd, mode)
return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, 'replace'):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub('', value)
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=''):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip('\r\n')
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
_wrap_std_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
stream = src_func() # In case wrapper_func() modified the stream
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(
lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(
lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(
lambda: sys.stderr, get_text_stderr)
binary_streams = {
'stdin': get_binary_stdin,
'stdout': get_binary_stdout,
'stderr': get_binary_stderr,
}
text_streams = {
'stdin': get_text_stdin,
'stdout': get_text_stdout,
'stderr': get_text_stderr,
}
|
apache-2.0
|
CCI-MOC/nova
|
nova/config.py
|
14
|
2488
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import options
from oslo_log import log
from nova import debugger
from nova import paths
from nova import rpc
from nova import version
CONF = cfg.CONF
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('nova.sqlite')
# NOTE(mikal): suds is used by the vmware driver, removing this will
# cause many extraneous log lines for their tempest runs. Refer to
# https://review.openstack.org/#/c/219225/ for details.
_DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo_messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
'keystonemiddleware=WARN', 'routes.middleware=WARN',
'stevedore=WARN', 'glanceclient=WARN']
_DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
'%(levelname)s %(name)s [%(request_id)s '
'%(user_identity)s] %(instance)s'
'%(message)s')
def parse_args(argv, default_config_files=None):
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
log.register_options(CONF)
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
debugger.register_cli_opts()
CONF(argv[1:],
project='nova',
version=version.version_string(),
default_config_files=default_config_files)
rpc.init(CONF)
|
apache-2.0
|
fx19880617/helix
|
helix-core/src/main/scripts/integration-test/script/pexpect.py
|
11
|
76727
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to Pexpect -- the function, run() and the class,
spawn. You can call the run() function to execute a command and return the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The more powerful interface is the spawn class. You can use this to spawn an
external child command and then interact with the child by sending lines and
expecting responses.
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect ('Password:')
child.sendline (mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Geoffrey Marshall, Francisco Lourenco, Glen Mabey, Karthik Gurusamy, Fernando
Perez, Corey Minyard, Jon Cohen, Guillaume Chazarain, Andrew Ryan, Nick
Craig-Wood, Andrew Stone, Jorgen Grahn (Let me know if I forgot anyone.)
Free, open source, and all that good stuff.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Pexpect Copyright (c) 2008 Noah Spurrier
http://pexpect.sourceforge.net/
$Id: pexpect.py 507 2007-12-27 02:40:52Z noah $
"""
try:
import os, sys, time
import select
import string
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
except ImportError, e:
raise ImportError (str(e) + """
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
__version__ = '2.3'
__revision__ = '$Revision: 399 $'
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'run', 'which',
'split_command_line', '__version__', '__revision__']
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child. This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a scan buffer fills before matching an expected pattern."""
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudo ttys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo [email protected]:.')
child.expect ('(?i)password')
child.sendline (mypassword)
The previous code can be replace with the following::
from pexpect import *
run ('scp foo [email protected]:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run ("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run ("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run ("ssh [email protected] 'ls -l'", events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback. """
if timeout == -1:
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env)
else:
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env)
if events is not None:
patterns = events.keys()
responses = events.values()
else:
patterns=None # We assume that EOF or TIMEOUT will save us.
responses=None
child_result_list = []
event_count = 0
while 1:
try:
index = child.expect (patterns)
if type(child.after) in types.StringTypes:
child_result_list.append(child.before + child.after)
else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
child_result_list.append(child.before)
if type(responses[index]) in types.StringTypes:
child.send(responses[index])
elif type(responses[index]) is types.FunctionType:
callback_result = responses[index](locals())
sys.stdout.flush()
if type(callback_result) in types.StringTypes:
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError ('The callback must be a string or function type.')
event_count = event_count + 1
except TIMEOUT, e:
child_result_list.append(child.before)
break
except EOF, e:
child_result_list.append(child.before)
break
child_result = ''.join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawn (object):
"""This is the main class interface for Pexpect. Use this class to start
and control child applications. """
def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
"""This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn ('/usr/bin/ftp')
child = pexpect.spawn ('/usr/bin/ssh [email protected]')
child = pexpect.spawn ('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn ('/usr/bin/ftp', [])
child = pexpect.spawn ('/usr/bin/ssh', ['[email protected]'])
child = pexpect.spawn ('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
If you want to run a command and pipe it through another command then
you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > log_list.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incomming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incomming buffer. The default is to search from the beginning of the
imcomming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match The searchwindowsize does not
effect the size of the incomming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
self.status = None # status returned by os.waitpid
self.flag_eof = False
self.pid = None
self.child_fd = -1 # initially closed
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
self.logfile_read = None # input from child (read_nonblocking)
self.logfile_send = None # output to send (send, sendline)
self.maxread = maxread # max bytes to read at one time into buffer
self.buffer = '' # This is the read buffer. See maxread.
self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
# Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
self.softspace = False # File-like object.
self.name = '<' + repr(self) + '>' # File-like object.
self.encoding = None # File-like object.
self.closed = True # File-like object.
self.cwd = cwd
self.env = env
self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
# Solaris uses internal __fork_pty(). All others use pty.fork().
if (sys.platform.lower().find('solaris')>=0) or (sys.platform.lower().find('sunos5')>=0):
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# allow dummy instances for subclasses that may not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn (command, args)
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. """
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
# -- Fernando Perez
try:
self.close()
except AttributeError:
pass
def __str__(self):
"""This returns a human-readable string that represents the state of
the object. """
s = []
s.append(repr(self))
s.append('version: ' + __version__ + ' (' + __revision__ + ')')
s.append('command: ' + str(self.command))
s.append('args: ' + str(self.args))
s.append('searcher: ' + str(self.searcher))
s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
s.append('before (last 100 chars): ' + str(self.before)[-100:])
s.append('after: ' + str(self.after))
s.append('match: ' + str(self.match))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self,command,args=[]):
"""This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. """
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may haved spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if type(command) == type(0):
raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
if type (args) != type([]):
raise TypeError ('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
self.args = args[:] # work with a copy
self.args.insert (0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join (self.args) + '>'
assert self.pid is None, 'The pid member should be None.'
assert self.command is not None, 'The command member should not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError, e:
raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
else: # Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0: # Child
try:
self.child_fd = sys.stdout.fileno() # used by setwinsize()
self.setwinsize(24, 80)
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range (3, max_fd):
try:
os.close (i)
except OSError:
pass
# I don't know why this works, but ignoring SIGHUP fixes a
# problem when trying to start a Java daemon with sudo
# (specifically, Tomcat).
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
"""This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
"""
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
pid = os.fork()
if pid < 0:
raise ExceptionPexpect, "Error! Failed os.fork()."
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
"""This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. """
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty if still connected.
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
os.setsid()
# Verify we are disconnected from controlling tty
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
raise ExceptionPexpect, "Error! We are not disconnected from a controlling tty."
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR);
if fd < 0:
raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
else:
os.close(fd)
def fileno (self): # File-like object.
"""This returns the file descriptor of the pty for the child.
"""
return self.child_fd
def close (self, force=True): # File-like object.
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close (self.child_fd)
time.sleep(self.delayafterclose) # Give kernel time to update process status.
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect ('close() could not terminate the child using terminate()')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush (self): # File-like object.
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty (self): # File-like object.
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. """
return os.isatty(self.child_fd)
def waitnoecho (self, timeout=-1):
"""This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn ('ssh [email protected]')
p.waitnoecho()
p.sendline(mypassword)
If timeout is None then this method to block forever until ECHO flag is
False.
"""
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho (self):
"""This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). """
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho (self, state):
"""This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.expect (['1234'])
p.expect (['1234'])
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['abcd'])
p.expect (['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['1234'])
p.expect (['1234'])
p.expect (['abcd'])
p.expect (['wxyz'])
"""
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
# and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def read_nonblocking (self, size = 1, timeout = -1):
"""This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely. If timeout is -1
then the self.timeout value is used. If timeout is 0 then the child is
polled and if there was no data immediately ready then this will raise
a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. """
if self.closed:
raise ValueError ('I/O operation on closed file in read_nonblocking().')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
if not r:
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
elif self.__irix_hack:
# This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
# This adds a 2 second delay, but only when the child is terminated.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
r,w,e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their processes are alive;
# then timeout on the select; and then finally admit that they are not alive.
self.flag_eof = True
raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
else:
raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError, e: # Linux does this
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
if s == '': # BSD style
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
if self.logfile is not None:
self.logfile.write (s)
self.logfile.flush()
if self.logfile_read is not None:
self.logfile_read.write (s)
self.logfile_read.flush()
return s
raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
def read (self, size = -1): # File-like object.
"""This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. """
if size == 0:
return ''
if size < 0:
self.expect (self.delimiter) # delimiter default is EOF
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile('.{%d}' % size, re.DOTALL)
index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.after ### self.before should be ''. Should I assert this?
return self.before
def readline (self, size = -1): # File-like object.
"""This reads and returns one entire line. A trailing newline is kept
in the string, but may be absent when a file ends with an incomplete
line. Note: This readline() looks for a \\r\\n pair even on UNIX
because this is what the pseudo tty device returns. So contrary to what
you may expect you will receive the newline as \\r\\n. An empty string
is returned when EOF is hit immediately. Currently, the size argument is
mostly ignored, so this behavior is not standard for a file-like
object. If size is 0 then an empty string is returned. """
if size == 0:
return ''
index = self.expect (['\r\n', self.delimiter]) # delimiter default is EOF
if index == 0:
return self.before + '\r\n'
else:
return self.before
def __iter__ (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
return self
def next (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
result = self.readline()
if result == "":
raise StopIteration
return result
def readlines (self, sizehint = -1): # File-like object.
"""This reads until EOF using readline() and returns a list containing
the lines thus read. The optional "sizehint" argument is ignored. """
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s): # File-like object.
"""This is similar to send() except that there is no return value.
"""
self.send (s)
def writelines (self, sequence): # File-like object.
"""This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators There is no return value.
"""
for s in sequence:
self.write (s)
def send(self, s):
"""This sends a string to the child process. This returns the number of
bytes written. If a log file was set then the data is also written to
the log. """
time.sleep(self.delaybeforesend)
if self.logfile is not None:
self.logfile.write (s)
self.logfile.flush()
if self.logfile_send is not None:
self.logfile_send.write (s)
self.logfile_send.flush()
c = os.write(self.child_fd, s)
return c
def sendline(self, s=''):
"""This is like send(), but it adds a line feed (os.linesep). This
returns the number of bytes written. """
n = self.send(s)
n = n + self.send (os.linesep)
return n
def sendcontrol(self, char):
"""This sends a control character to the child such as Ctrl-C or
Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
child.sendcontrol('g')
See also, sendintr() and sendeof().
"""
char = char.lower()
a = ord(char)
if a>=97 and a<=122:
a = a - ord('a') + 1
return self.send (chr(a))
d = {'@':0, '`':0,
'[':27, '{':27,
'\\':28, '|':28,
']':29, '}': 29,
'^':30, '~':30,
'_':31,
'?':127}
if char not in d:
return 0
return self.send (chr(d[char]))
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. """
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write (self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write (self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
else:
# platform does not define VEOF so assume CTRL-D
char = chr(4)
self.send(char)
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
if hasattr(termios, 'VINTR'):
char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
else:
# platform does not define VINTR so assume CTRL-C
char = chr(3)
self.send (char)
def eof (self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def terminate(self, force=False):
"""This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. """
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError, e:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(); but, technically, the child
is still alive until its output is read. """
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect ('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. """
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form of waitpid to get
# status of a defunct process. This is super-lame. The flag_eof would have
# been set in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError, e: # No child processes
if e[0] == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# I have to do this twice for Solaris. I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process wishes to
# report, and the value of status is undefined.
if pid == 0:
try:
pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
except OSError, e: # This should never happen...
if e[0] == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# If pid is still 0 after two calls to waitpid() then
# the process really is alive. This seems to work on all platforms, except
# for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
# take care of this situation (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return False
def kill(self, sig):
"""This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. """
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def compile_pattern_list(self, patterns):
"""This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
"""
if patterns is None:
return []
if type(patterns) is not types.ListType:
patterns = [patterns]
compile_flags = re.DOTALL # Allow dot to match \n
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for p in patterns:
if type(p) in types.StringTypes:
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif type(p) is type(re.compile('')):
compiled_pattern_list.append(p)
else:
raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
return compiled_pattern_list
def expect(self, pattern, timeout = -1, searchwindowsize=None):
"""This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first match
in the stream is chosen. If more than one pattern matches at that point,
the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect (['bar', 'foo', 'foobar'])
# returns 1 ('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect (['foobar', 'foo'])
# returns 0 ('foobar') if all input is available at once,
# but returs 1 ('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect (['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect (pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
"""
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT (which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. """
return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match."""
if type(pattern_list) in types.StringTypes or pattern_list in (TIMEOUT, EOF):
pattern_list = [pattern_list]
return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
"""This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and what
to search for in the input.
See expect() for other arguments, return value and exceptions. """
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True: # Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end : ]
self.before = incoming[ : searcher.start]
self.after = incoming[searcher.start : searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if timeout < 0 and timeout is not None:
raise TIMEOUT ('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking (self.maxread, timeout)
freshlen = len(c)
time.sleep (0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF, e:
self.buffer = ''
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF (str(e) + '\n' + str(self))
except TIMEOUT, e:
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT (str(e) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, r, c):
"""This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. """
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
TIOCSWINSZ = -2146929561 # Same bits, but with sign.
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', r, c, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character = chr(29), input_filter = None, output_filter = None):
"""This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
"""
# Flush the buffer.
self.stdout.write (self.buffer)
self.stdout.flush()
self.buffer = ''
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
"""This is used by the interact() method.
"""
while data != '' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
"""This is used by the interact() method.
"""
return os.read(fd, 1000)
def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
"""This is used by the interact() method.
"""
while self.isalive():
r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
data = self.__interact_read(self.child_fd)
if output_filter: data = output_filter(data)
if self.logfile is not None:
self.logfile.write (data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter: data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select (self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error, e:
if e[0] == errno.EINTR:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something else caused the select.error, so this really is an exception
raise
##############################################################################
# The following methods are no longer supported or allowed.
def setmaxread (self, maxread):
"""This method is no longer supported or allowed. I don't like getters
and setters without a good reason. """
raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the maxread member variable.')
def setlog (self, fileobject):
"""This method is no longer supported or allowed.
"""
raise ExceptionPexpect ('This method is no longer supported or allowed. Just assign a value to the logfile member variable.')
##############################################################################
# End of spawn class
##############################################################################
class searcher_string (object):
"""This is a plain string search helper for the spawn.expect_any() method.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
"""
def __init__(self, strings):
"""This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in zip(range(len(strings)), strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (ns[0],' %d: "%s"' % ns) for ns in self._strings ]
ss.append((-1,'searcher_string:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. """
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen+len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and n < first_match:
first_match = n
best_index, best_match = index, s
if first_match == absurd_match:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re (object):
"""This is regular expression string search helper for the
spawn.expect_any() method.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
"""
def __init__(self, patterns):
"""This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types."""
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(range(len(patterns)), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (n,' %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
ss.append((-1,'searcher_re:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = zip(*ss)[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1."""
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer)-searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if n < first_match:
first_match = n
the_match = match
best_index = index
if first_match == absurd_match:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which (filename):
"""This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None."""
# Special case where filename already contains a path.
if os.path.dirname(filename) != '':
if os.access (filename, os.X_OK):
return filename
if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
# Oddly enough this was the one line that made Pexpect
# incompatible with Python 1.5.2.
#pathlist = p.split (os.pathsep)
pathlist = string.split (p, os.pathsep)
for path in pathlist:
f = os.path.join(path, filename)
if os.access(f, os.X_OK):
return f
return None
def split_command_line(command_line):
"""This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
state_whitespace = 4 # The state of consuming whitespace between commands.
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\': # Escape the next character
state = state_esc
elif c == r"'": # Handle single quote
state = state_singlequote
elif c == r'"': # Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
None # Do nothing.
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:ts=4:sw=4:expandtab:ft=python:
|
apache-2.0
|
google/llvm-propeller
|
lldb/test/API/lang/objc/objc-struct-argument/TestObjCStructArgument.py
|
3
|
2329
|
"""Test passing structs to Objective-C methods."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestObjCStructArgument(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break inside main().
self.main_source = "test.m"
self.break_line = line_number(
self.main_source, '// Set breakpoint here.')
@skipUnlessDarwin
@add_test_categories(['pyapi'])
@skipIf(debug_info=no_match(["gmodules"]), oslist=['ios', 'watchos', 'tvos', 'bridgeos'], archs=['armv7', 'arm64']) # this test program only builds for ios with -gmodules
def test_with_python_api(self):
"""Test passing structs to Objective-C methods."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
bpt = target.BreakpointCreateByLocation(
self.main_source, self.break_line)
self.assertTrue(bpt, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
thread_list = lldbutil.get_threads_stopped_at_breakpoint(process, bpt)
# Make sure we stopped at the first breakpoint.
self.assertTrue(
len(thread_list) != 0,
"No thread stopped at our breakpoint.")
self.assertEquals(len(thread_list), 1,
"More than one thread stopped at our breakpoint.")
frame = thread_list[0].GetFrameAtIndex(0)
self.assertTrue(frame, "Got a valid frame 0 frame.")
self.expect("p [summer sumThings:tts]", substrs=['9'])
self.expect(
"po [NSValue valueWithRect:rect]",
substrs=['NSRect: {{0, 0}, {10, 20}}'])
# Now make sure we can call a method that returns a struct without
# crashing.
cmd_value = frame.EvaluateExpression("[provider getRange]")
self.assertTrue(cmd_value.IsValid())
|
apache-2.0
|
sharifulgeo/networkx
|
networkx/algorithms/components/tests/test_semiconnected.py
|
64
|
1901
|
from itertools import chain
import networkx as nx
from nose.tools import *
class TestIsSemiconnected(object):
def test_undirected(self):
assert_raises(nx.NetworkXNotImplemented, nx.is_semiconnected,
nx.Graph())
assert_raises(nx.NetworkXNotImplemented, nx.is_semiconnected,
nx.MultiGraph())
def test_empty(self):
assert_raises(nx.NetworkXPointlessConcept, nx.is_semiconnected,
nx.DiGraph())
assert_raises(nx.NetworkXPointlessConcept, nx.is_semiconnected,
nx.MultiDiGraph())
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
ok_(nx.is_semiconnected(G))
def test_path(self):
G = nx.path_graph(100, create_using=nx.DiGraph())
ok_(nx.is_semiconnected(G))
G.add_edge(100, 99)
ok_(not nx.is_semiconnected(G))
def test_cycle(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
ok_(nx.is_semiconnected(G))
G = nx.path_graph(100, create_using=nx.DiGraph())
G.add_edge(0, 99)
ok_(nx.is_semiconnected(G))
def test_tree(self):
G = nx.DiGraph()
G.add_edges_from(chain.from_iterable([(i, 2 * i + 1), (i, 2 * i + 2)]
for i in range(100)))
ok_(not nx.is_semiconnected(G))
def test_dumbbell(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
G.add_edges_from((i + 100, (i + 1) % 100 + 100) for i in range(100))
ok_(not nx.is_semiconnected(G)) # G is disconnected.
G.add_edge(100, 99)
ok_(nx.is_semiconnected(G))
def test_alternating_path(self):
G = nx.DiGraph(chain.from_iterable([(i, i - 1), (i, i + 1)]
for i in range(0, 100, 2)))
ok_(not nx.is_semiconnected(G))
|
bsd-3-clause
|
oliviertilmans/ipmininet
|
ipmininet/host/config/named.py
|
1
|
15108
|
"""Base classes to configure a Named daemon"""
import os
from typing import List, Union, Sequence, Optional
from ipaddress import IPv4Address, IPv6Address, ip_address
from mininet.log import lg
from ipmininet.overlay import Overlay
from ipmininet.utils import realIntfList, find_node, has_cmd
from ipmininet.router.config.utils import ConfigDict
from .base import HostDaemon
DNS_REFRESH = 86400
DNS_RETRY = 7200
DNS_EXPIRE = 3600000
DNS_MIN_TTL = 172800
class Named(HostDaemon):
NAME = 'named'
KILL_PATTERNS = (NAME,)
def __init__(self, node, **kwargs):
# Check if apparmor is enabled in the distribution
self.apparmor = has_cmd("aa-exec")
super().__init__(node, **kwargs)
@property
def startup_line(self):
# This runs the daemon outside of AppArmor's restrictions
return '{apparmor}{name} -c {cfg} -f -u root -p {port}' \
.format(apparmor="aa-exec -p unconfined " if self.apparmor else "",
name=self.NAME,
cfg=self.cfg_filename,
port=self.options.dns_server_port)
@property
def dry_run(self):
return '{name} {cfg}' \
.format(name='named-checkconf', cfg=self.cfg_filename)
def build(self):
cfg = super().build()
cfg.log_severity = self.options.log_severity
cfg.abs_logfile = os.path.abspath(cfg.logfile)
cfg.zones = ConfigDict()
for zone in self._node.get('dns_zones', []):
cfg.zones[self.zone_filename(zone.name)] = self.build_zone(zone)
self.build_reverse_zone(cfg.zones)
return cfg
def build_zone(self, zone: 'DNSZone') -> ConfigDict:
master_ips = []
for s_name in zone.servers + [zone.dns_master] + zone.dns_slaves:
server_itf = find_node(self._node, s_name)
if server_itf is None:
lg.error("Cannot find the server node {name} of DNS zone"
" {zone}. Are you sure that they are connected to "
"the current node {current}?"
.format(name=s_name, zone=zone.name,
current=self._node.name))
continue
server = server_itf.node
for itf in realIntfList(server):
for ip in itf.ips():
if ".arpa" not in zone.name: # Not a Reverse zone
zone.soa_record.add_record(ARecord(s_name,
ip.ip.compressed))
if s_name == zone.dns_master:
master_ips.append(ip.ip.compressed)
for ip6 in itf.ip6s(exclude_lls=True):
if ".arpa" not in zone.name: # Not a Reverse zone
zone.soa_record.add_record(
AAAARecord(s_name, ip6.ip.compressed))
if s_name == zone.dns_master:
master_ips.append(ip6.ip.compressed)
return ConfigDict(name=zone.soa_record.domain_name,
soa_record=zone.soa_record,
records=zone.soa_record.records,
master=self._node.name == zone.dns_master,
master_ips=master_ips)
def build_reverse_zone(self, cfg_zones: ConfigDict):
"""
Build non-existing PTR records. Then, adds them to an existing reverse
zone if any. The remaining ones are inserted in a new reverse zone
that is added to cfg_zones dictionary.
"""
# Build PTR records
ptr_records = []
for zone in cfg_zones.values():
for record in zone.soa_record.records:
if record.rtype != "A" and record.rtype != "AAAA":
continue
domain_name = record.domain_name if record.full_domain_name \
else record.domain_name + "." + zone.name
ptr_records.append(PTRRecord(record.address, domain_name,
ttl=record.ttl))
existing_records = [record for zone in cfg_zones.values()
for record in zone.soa_record.records
if record.rtype == "PTR"]
ptr_v6_records = []
ptr_v4_records = []
for record in ptr_records:
# Filter out existing PTR records
if record in existing_records:
continue
# Try to place the rest in existing reverse DNS zones
found = False
for zone in cfg_zones.values():
if zone.name in record.domain_name:
zone.soa_record.records.append(record)
found = True
break
# The rest needs a new DNS zone
if not found:
if record.v6:
ptr_v6_records.append(record)
else:
ptr_v4_records.append(record)
# Create new reverse DNS zones for remaining PTR records
if len(ptr_v6_records) > 0:
self.build_largest_reverse_zone(cfg_zones, ptr_v6_records)
if len(ptr_v4_records) > 0:
self.build_largest_reverse_zone(cfg_zones, ptr_v4_records)
def build_largest_reverse_zone(self, cfg_zones: ConfigDict,
records: List[Union['PTRRecord',
'NSRecord']]):
"""
Create the ConfigDict object representing a new reverse zone whose
prefix is the largest one that includes all the PTR records.
Then it adds it to the cfg_zones dict.
:param cfg_zones: The dict of ConfigDict representing existing zones
:param records: The list of PTR records to place a new reverse zone
"""
if len(records) == 0:
return
# Find common prefix between all records
common = records[0].domain_name.split(".")
for i in range(1, len(records)):
prefix = records[i].domain_name.split(".")
for j in range(1, len(common)):
if prefix[len(prefix)-j] != common[len(common)-j]:
common = common[len(prefix)+1-j:]
break
domain_name = ".".join(common)
# Retrieve the NS Record for the new zone
ns_record = None
for zone in cfg_zones.values():
if "arpa" in zone.name:
continue
for record in zone.soa_record.records:
if record.rtype == "NS" \
and self._node.name in record.name_server:
ns_record = NSRecord(record.domain_name, self._node.name)
ns_record.domain_name = domain_name
if ns_record is None:
lg.warning("Cannot forge a DNS reverse zone because there is no"
" NS Record for this node in regular zones.\n")
return
records.append(ns_record)
# Build the reverse zone
soa_record = SOARecord(domain_name=domain_name, records=records)
reverse_zone = ConfigDict(name=soa_record.domain_name,
soa_record=soa_record,
records=soa_record.records,
master=True,
master_ips=[])
self._node.params.setdefault('dns_zones', []).append(reverse_zone)
cfg_zones[self.zone_filename(reverse_zone.name)] = reverse_zone
def set_defaults(self, defaults):
""":param log_severity: It controls the logging levels and may take the
values defined. Logging will occur for any message equal to or
higher than the level specified (=>) lower levels will not be
logged. These levels are 'critical', 'error', 'warning',
'notice', 'info', 'debug' and 'dynamic'.
:param dns_server_port: The port number of the dns server"""
defaults.log_severity = "warning"
defaults.dns_server_port = 53
super().set_defaults(defaults)
def zone_filename(self, domain_name: str) -> str:
return self._file(suffix='%s.cfg' % domain_name)
@property
def cfg_filenames(self):
return super().cfg_filenames + \
[self.zone_filename(z.name)
for z in self._node.get('dns_zones', [])]
@property
def template_filenames(self):
return super().template_filenames + \
["%s-zone.mako" % self.NAME
for _ in self._node.get('dns_zones', [])]
class DNSRecord:
def __init__(self, rtype: str, domain_name: str, ttl=60):
self.rtype = rtype
self.domain_name = domain_name
self.ttl = ttl
if self.domain_name[-1] != "." and "." in self.domain_name:
# Full DNS names should be ended by a dot in the config
self.domain_name = self.domain_name + "."
@property
def rdata(self) -> str:
return ""
@property
def full_domain_name(self) -> bool:
return "." in self.domain_name
def __eq__(self, other):
return self.rtype == other.rtype \
and self.domain_name == other.domain_name \
and self.rdata == other.rdata
class ARecord(DNSRecord):
def __init__(self, domain_name,
address: Union[str, IPv4Address, IPv6Address], ttl=60):
self.address = ip_address(str(address))
rtype = "A" if self.address.version == 4 else "AAAA"
super().__init__(rtype=rtype, domain_name=domain_name, ttl=ttl)
@property
def rdata(self):
return self.address.compressed
class AAAARecord(ARecord):
pass # ARecord already handles IPv6 addresses
class PTRRecord(DNSRecord):
def __init__(self, address: Union[str, IPv4Address, IPv6Address],
domain_name: str, ttl=60):
self.address = ip_address(str(address))
self.mapped_domain_name = domain_name
if self.mapped_domain_name[-1] != "." \
and "." in self.mapped_domain_name:
# Full DNS names should be ended by a dot in the config
self.mapped_domain_name = self.mapped_domain_name + "."
super().__init__("PTR", self.address.reverse_pointer, ttl=ttl)
@property
def v6(self):
return self.address.version == 6
@property
def rdata(self):
return self.mapped_domain_name
class NSRecord(DNSRecord):
def __init__(self, domain_name, name_server: str, ttl=60):
super().__init__(rtype="NS", domain_name=domain_name, ttl=ttl)
self.name_server = name_server
if "." not in self.name_server:
self.name_server = self.name_server + "." + self.domain_name
if self.name_server[-1] != ".":
# Full DNS names should be ended by a dot in the config
self.name_server = self.name_server + "."
@property
def rdata(self):
return self.name_server
class SOARecord(DNSRecord):
def __init__(self, domain_name, refresh_time=DNS_REFRESH,
retry_time=DNS_RETRY, expire_time=DNS_EXPIRE,
min_ttl=DNS_MIN_TTL, records: Sequence[DNSRecord] = ()):
super().__init__(rtype="SOA", domain_name=domain_name, ttl=min_ttl)
self.refresh_time = refresh_time
self.retry_time = retry_time
self.expire_time = expire_time
self._records = list(records)
@property
def rdata(self):
return "{domain_name} sysadmin.{domain_name} (\n1 ; serial\n{refresh}" \
" ; refresh timer\n{retry} ; retry timer\n{expire}" \
" ; retry timer\n{min_ttl} ; minimum ttl\n)"\
.format(domain_name=self.domain_name, refresh=self.refresh_time,
retry=self.retry_time, expire=self.expire_time,
min_ttl=self.ttl)
@property
def records(self):
return self._records
def add_record(self, record: DNSRecord):
if record not in self._records:
self._records.append(record)
class DNSZone(Overlay):
def __init__(self, name: str, dns_master: str,
dns_slaves: Sequence[str] = (),
records: Sequence[DNSRecord] = (), nodes: Sequence[str] = (),
refresh_time=DNS_REFRESH, retry_time=DNS_RETRY,
expire_time=DNS_EXPIRE, min_ttl=DNS_MIN_TTL,
ns_domain_name: Optional[str] = None):
"""
:param name: The domain name of the zone
:param dns_master: The name of the master DNS server
:param dns_slaves: The list of names of DNS slaves
:param records: The list of DNS Records to be included in the zone
:param nodes: The list of nodes for which one A/AAAA record has to be
created for each of their IPv4/IPv6 addresses
:param refresh_time: The number of seconds before the zone should be
refreshed
:param retry_time: The number of seconds before a failed refresh should
be retried
:param expire_time: The upper limit in seconds before a zone is
considered no longer authoritative
:param min_ttl: The negative result TTL
:param ns_domain_name: If it is defined, it is the suffix of the domain
of the name servers, otherwise, parameter 'name'
is used.
"""
self.name = name
self.dns_master = dns_master
self.dns_slaves = list(dns_slaves)
self.records = records
self.servers = list(nodes)
self.soa_record = SOARecord(name, refresh_time=refresh_time,
retry_time=retry_time,
expire_time=expire_time, min_ttl=min_ttl,
records=records)
super().__init__(nodes=[dns_master] + list(dns_slaves))
self.consistent = True
for node_name in [dns_master] + self.dns_slaves + self.servers:
if "." in node_name:
lg.error("Cannot create zone {name} because the node name"
" {node_name} contains a '.'"
.format(name=name, node_name=node_name))
self.consistent = False
self.ns_domain_name = ns_domain_name if ns_domain_name is not None \
else self.name
def check_consistency(self, topo):
return super().check_consistency(topo) and self.consistent
def apply(self, topo):
super().apply(topo)
if not self.consistent:
return
# Add NS Records (if not already present)
for n in self.nodes:
self.soa_record.add_record(NSRecord(self.name,
n + "." + self.ns_domain_name))
for n in self.nodes:
topo.nodeInfo(n).setdefault("dns_zones", []).append(self)
|
gpl-2.0
|
a113n/bcbio-nextgen
|
bcbio/distributed/multi.py
|
4
|
3761
|
"""Run tasks in parallel on a single machine using multiple cores.
"""
import functools
try:
import joblib
except ImportError:
joblib = False
from bcbio.distributed import resources
from bcbio.log import logger, setup_local_logging
from bcbio.pipeline import config_utils
from bcbio.provenance import diagnostics, system
def runner(parallel, config):
"""Run functions, provided by string name, on multiple cores on the current machine.
"""
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name)
logger.info("multiprocessing: %s" % fn_name)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel
def get_fn(fn_name, parallel):
taskmod = "multitasks"
imodule = parallel.get("module", "bcbio.distributed")
import_fn_name = parallel.get("wrapper", fn_name)
return getattr(__import__("{base}.{taskmod}".format(base=imodule, taskmod=taskmod),
fromlist=[taskmod]),
import_fn_name)
def zeromq_aware_logging(f):
"""Ensure multiprocessing logging uses ZeroMQ queues.
ZeroMQ and local stdout/stderr do not behave nicely when intertwined. This
ensures the local logging uses existing ZeroMQ logging queues.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
config = None
for arg in args:
if config_utils.is_std_config_arg(arg):
config = arg
break
elif config_utils.is_nested_config_arg(arg):
config = arg["config"]
elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(arg[0]):
config = arg[0]["config"]
break
assert config, "Could not find config dictionary in function arguments."
if config.get("parallel", {}).get("log_queue") and not config.get("parallel", {}).get("wrapper"):
handler = setup_local_logging(config, config["parallel"])
else:
handler = None
try:
out = f(*args, **kwargs)
finally:
if handler and hasattr(handler, "close"):
handler.close()
return out
return wrapper
def run_multicore(fn, items, config, parallel=None):
"""Run the function using multiple cores on the given items to process.
"""
if len(items) == 0:
return []
if parallel is None or "num_jobs" not in parallel:
if parallel is None:
parallel = {"type": "local", "cores": config["algorithm"].get("num_cores", 1)}
sysinfo = system.get_info({}, parallel)
parallel = resources.calculate(parallel, items, sysinfo, config,
parallel.get("multiplier", 1),
max_multicore=int(parallel.get("max_multicore", sysinfo["cores"])))
items = [config_utils.add_cores_to_config(x, parallel["cores_per_job"]) for x in items]
if joblib is None:
raise ImportError("Need joblib for multiprocessing parallelization")
out = []
for data in joblib.Parallel(parallel["num_jobs"], batch_size=1, backend="multiprocessing")(joblib.delayed(fn)(*x) for x in items):
if data:
out.extend(data)
return out
|
mit
|
quizlet/grpc
|
tools/run_tests/python_utils/upload_test_results.py
|
7
|
4239
|
#!/usr/bin/env python
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to upload Jenkins test results to BQ"""
from __future__ import print_function
import os
import six
import sys
import time
import uuid
gcp_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
_DATASET_ID = 'jenkins_test_results'
_DESCRIPTION = 'Test results from master job run on Jenkins'
# 90 days in milliseconds
_EXPIRATION_MS = 90 * 24 * 60 * 60 * 1000
_PARTITION_TYPE = 'DAY'
_PROJECT_ID = 'grpc-testing'
_RESULTS_SCHEMA = [
('job_name', 'STRING', 'Name of Jenkins job'),
('build_id', 'INTEGER', 'Build ID of Jenkins job'),
('build_url', 'STRING', 'URL of Jenkins job'),
('test_name', 'STRING', 'Individual test name'),
('language', 'STRING', 'Language of test'),
('platform', 'STRING', 'Platform used for test'),
('config', 'STRING', 'Config used for test'),
('compiler', 'STRING', 'Compiler used for test'),
('iomgr_platform', 'STRING', 'Iomgr used for test'),
('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
('elapsed_time', 'FLOAT', 'How long test took to run'),
('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
('return_code', 'INTEGER', 'Exit code of test'),
]
def _get_build_metadata(test_results):
"""Add Jenkins/Kokoro build metadata to test_results based on environment
variables set by Jenkins/Kokoro.
"""
build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
if build_id:
test_results['build_id'] = build_id
if build_url:
test_results['build_url'] = build_url
if job_name:
test_results['job_name'] = job_name
def upload_results_to_bq(resultset, bq_table, args, platform):
"""Upload test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
args: args in run_tests.py, generated by argparse
platform: string name of platform tests were run on
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION,
partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
for shortname, results in six.iteritems(resultset):
for result in results:
test_results = {}
_get_build_metadata(test_results)
test_results['compiler'] = args.compiler
test_results['config'] = args.config
test_results['cpu_estimated'] = result.cpu_estimated
test_results['cpu_measured'] = result.cpu_measured
test_results['elapsed_time'] = '%.2f' % result.elapsed_time
test_results['iomgr_platform'] = args.iomgr_platform
# args.language is a list, but will always have one element in the contexts
# this function is used.
test_results['language'] = args.language[0]
test_results['platform'] = platform
test_results['result'] = result.state
test_results['return_code'] = result.returncode
test_results['test_name'] = shortname
test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
print('Error uploading result to bigquery.')
sys.exit(1)
|
apache-2.0
|
tcheehow/MissionPlanner
|
Lib/email/_parseaddr.py
|
53
|
16241
|
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: [email protected]
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def quote(str):
"""Prepare string to be used in a quoted string.
Turns backslash and double quote characters into quoted pairs. These
are the only characters that need to be quoted inside a quoted string.
Does not add the surrounding double quotes.
"""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % quote(self.getquote()))
elif self.field[self.pos] in self.atomends:
break
else:
aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
|
gpl-3.0
|
don-github/edx-platform
|
lms/djangoapps/lms_xblock/test/test_runtime.py
|
92
|
6099
|
"""
Tests of the LMS XBlock Runtime and associated utilities
"""
from django.contrib.auth.models import User
from django.conf import settings
from ddt import ddt, data
from mock import Mock
from unittest import TestCase
from urlparse import urlparse
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from lms.djangoapps.lms_xblock.runtime import quote_slashes, unquote_slashes, LmsModuleSystem
from xblock.fields import ScopeIds
TEST_STRINGS = [
'',
'foobar',
'foo/bar',
'foo/bar;',
'foo;;bar',
'foo;_bar',
'foo/',
'/bar',
'foo//bar',
'foo;;;bar',
]
@ddt
class TestQuoteSlashes(TestCase):
"""Test the quote_slashes and unquote_slashes functions"""
@data(*TEST_STRINGS)
def test_inverse(self, test_string):
self.assertEquals(test_string, unquote_slashes(quote_slashes(test_string)))
@data(*TEST_STRINGS)
def test_escaped(self, test_string):
self.assertNotIn('/', quote_slashes(test_string))
class TestHandlerUrl(TestCase):
"""Test the LMS handler_url"""
def setUp(self):
super(TestHandlerUrl, self).setUp()
self.block = Mock(name='block', scope_ids=ScopeIds(None, None, None, 'dummy'))
self.course_key = SlashSeparatedCourseKey("org", "course", "run")
self.runtime = LmsModuleSystem(
static_url='/static',
track_function=Mock(),
get_module=Mock(),
render_template=Mock(),
replace_urls=str,
course_id=self.course_key,
descriptor_runtime=Mock(),
)
def test_trailing_characters(self):
self.assertFalse(self.runtime.handler_url(self.block, 'handler').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler').endswith('/'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix').endswith('/'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix', 'query').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', 'suffix', 'query').endswith('/'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', query='query').endswith('?'))
self.assertFalse(self.runtime.handler_url(self.block, 'handler', query='query').endswith('/'))
def _parsed_query(self, query_string):
"""Return the parsed query string from a handler_url generated with the supplied query_string"""
return urlparse(self.runtime.handler_url(self.block, 'handler', query=query_string)).query
def test_query_string(self):
self.assertIn('foo=bar', self._parsed_query('foo=bar'))
self.assertIn('foo=bar&baz=true', self._parsed_query('foo=bar&baz=true'))
self.assertIn('foo&bar&baz', self._parsed_query('foo&bar&baz'))
def _parsed_path(self, handler_name='handler', suffix=''):
"""Return the parsed path from a handler_url with the supplied handler_name and suffix"""
return urlparse(self.runtime.handler_url(self.block, handler_name, suffix=suffix)).path
def test_suffix(self):
self.assertTrue(self._parsed_path(suffix="foo").endswith('foo'))
self.assertTrue(self._parsed_path(suffix="foo/bar").endswith('foo/bar'))
self.assertTrue(self._parsed_path(suffix="/foo/bar").endswith('/foo/bar'))
def test_handler_name(self):
self.assertIn('handler1', self._parsed_path('handler1'))
self.assertIn('handler_a', self._parsed_path('handler_a'))
def test_thirdparty_fq(self):
"""Testing the Fully-Qualified URL returned by thirdparty=True"""
parsed_fq_url = urlparse(self.runtime.handler_url(self.block, 'handler', thirdparty=True))
self.assertEqual(parsed_fq_url.scheme, 'https')
self.assertEqual(parsed_fq_url.hostname, settings.SITE_NAME)
def test_not_thirdparty_rel(self):
"""Testing the Fully-Qualified URL returned by thirdparty=False"""
parsed_fq_url = urlparse(self.runtime.handler_url(self.block, 'handler', thirdparty=False))
self.assertEqual(parsed_fq_url.scheme, '')
self.assertIsNone(parsed_fq_url.hostname)
class TestUserServiceAPI(TestCase):
"""Test the user service interface"""
def setUp(self):
super(TestUserServiceAPI, self).setUp()
self.course_id = SlashSeparatedCourseKey("org", "course", "run")
self.user = User(username='runtime_robot', email='[email protected]', password='test', first_name='Robot')
self.user.save()
def mock_get_real_user(_anon_id):
"""Just returns the test user"""
return self.user
self.runtime = LmsModuleSystem(
static_url='/static',
track_function=Mock(),
get_module=Mock(),
render_template=Mock(),
replace_urls=str,
course_id=self.course_id,
get_real_user=mock_get_real_user,
descriptor_runtime=Mock(),
)
self.scope = 'course'
self.key = 'key1'
self.mock_block = Mock()
self.mock_block.service_declaration.return_value = 'needs'
def test_get_set_tag(self):
# test for when we haven't set the tag yet
tag = self.runtime.service(self.mock_block, 'user_tags').get_tag(self.scope, self.key)
self.assertIsNone(tag)
# set the tag
set_value = 'value'
self.runtime.service(self.mock_block, 'user_tags').set_tag(self.scope, self.key, set_value)
tag = self.runtime.service(self.mock_block, 'user_tags').get_tag(self.scope, self.key)
self.assertEqual(tag, set_value)
# Try to set tag in wrong scope
with self.assertRaises(ValueError):
self.runtime.service(self.mock_block, 'user_tags').set_tag('fake_scope', self.key, set_value)
# Try to get tag in wrong scope
with self.assertRaises(ValueError):
self.runtime.service(self.mock_block, 'user_tags').get_tag('fake_scope', self.key)
|
agpl-3.0
|
Sjors/bitcoin
|
test/functional/wallet_backup.py
|
22
|
8943
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletBackupTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]", "-keypool=100"],
["[email protected]"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
self.sync_mempools()
self.nodes[3].generate(1)
self.sync_blocks()
# As above, this mirrors the original bash test.
def start_three(self, args=()):
self.start_node(0, self.extra_args[0] + list(args))
self.start_node(1, self.extra_args[1] + list(args))
self.start_node(2, self.extra_args[2] + list(args))
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
def init_three(self):
self.init_wallet(0)
self.init_wallet(1)
self.init_wallet(2)
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
self.sync_blocks()
self.nodes[1].generate(1)
self.sync_blocks()
self.nodes[2].generate(1)
self.sync_blocks()
self.nodes[3].generate(100)
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for _ in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
if not self.options.descriptors:
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for _ in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
self.log.info("Re-starting nodes")
self.start_three()
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if not self.options.descriptors:
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
self.start_three(["-nowallet"])
self.init_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, '.', 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
|
mit
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/test/test_error.py
|
20
|
4619
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.internet import error
import socket
class TestStringification(unittest.TestCase):
"""Test that the exceptions have useful stringifications.
"""
listOfTests = [
#(output, exception[, args[, kwargs]]),
("An error occurred binding to an interface.",
error.BindError),
("An error occurred binding to an interface: foo.",
error.BindError, ['foo']),
("An error occurred binding to an interface: foo bar.",
error.BindError, ['foo', 'bar']),
("Couldn't listen on eth0:4242: Foo.",
error.CannotListenError,
('eth0', 4242, socket.error('Foo'))),
("Message is too long to send.",
error.MessageLengthError),
("Message is too long to send: foo bar.",
error.MessageLengthError, ['foo', 'bar']),
("DNS lookup failed.",
error.DNSLookupError),
("DNS lookup failed: foo bar.",
error.DNSLookupError, ['foo', 'bar']),
("An error occurred while connecting.",
error.ConnectError),
("An error occurred while connecting: someOsError.",
error.ConnectError, ['someOsError']),
("An error occurred while connecting: foo.",
error.ConnectError, [], {'string': 'foo'}),
("An error occurred while connecting: someOsError: foo.",
error.ConnectError, ['someOsError', 'foo']),
("Couldn't bind.",
error.ConnectBindError),
("Couldn't bind: someOsError.",
error.ConnectBindError, ['someOsError']),
("Couldn't bind: someOsError: foo.",
error.ConnectBindError, ['someOsError', 'foo']),
("Hostname couldn't be looked up.",
error.UnknownHostError),
("No route to host.",
error.NoRouteError),
("Connection was refused by other side.",
error.ConnectionRefusedError),
("TCP connection timed out.",
error.TCPTimedOutError),
("File used for UNIX socket is no good.",
error.BadFileError),
("Service name given as port is unknown.",
error.ServiceNameUnknownError),
("User aborted connection.",
error.UserError),
("User timeout caused connection failure.",
error.TimeoutError),
("An SSL error occurred.",
error.SSLError),
("Connection to the other side was lost in a non-clean fashion.",
error.ConnectionLost),
("Connection to the other side was lost in a non-clean fashion: foo bar.",
error.ConnectionLost, ['foo', 'bar']),
("Connection was closed cleanly.",
error.ConnectionDone),
("Connection was closed cleanly: foo bar.",
error.ConnectionDone, ['foo', 'bar']),
("Uh.", #TODO nice docstring, you've got there.
error.ConnectionFdescWentAway),
("Tried to cancel an already-called event.",
error.AlreadyCalled),
("Tried to cancel an already-called event: foo bar.",
error.AlreadyCalled, ['foo', 'bar']),
("Tried to cancel an already-cancelled event.",
error.AlreadyCancelled),
("A process has ended without apparent errors: process finished with exit code 0.",
error.ProcessDone,
[None]),
("A process has ended with a probable error condition: process ended.",
error.ProcessTerminated),
("A process has ended with a probable error condition: process ended with exit code 42.",
error.ProcessTerminated,
[],
{'exitCode': 42}),
("A process has ended with a probable error condition: process ended by signal SIGBUS.",
error.ProcessTerminated,
[],
{'signal': 'SIGBUS'}),
("The Connector was not connecting when it was asked to stop connecting.",
error.NotConnectingError),
("The Port was not listening when it was asked to stop listening.",
error.NotListeningError),
]
def testThemAll(self):
for entry in self.listOfTests:
output = entry[0]
exception = entry[1]
try:
args = entry[2]
except IndexError:
args = ()
try:
kwargs = entry[3]
except IndexError:
kwargs = {}
self.failUnlessEqual(
str(exception(*args, **kwargs)),
output)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
amyvmiwei/neon
|
neon/util/param.py
|
4
|
1219
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Contains various functions for checking and setting required and optional
parameters.
"""
def req_param(obj, paramlist):
for param in paramlist:
if not hasattr(obj, param):
raise ValueError("req param %s missing for %s" % (param,
obj.__class__.__name__))
def opt_param(obj, paramlist, default_value=None):
for param in paramlist:
if not hasattr(obj, param):
setattr(obj, param, default_value)
|
apache-2.0
|
tysonclugg/django
|
tests/template_loader/tests.py
|
153
|
7435
|
from django.template import TemplateDoesNotExist
from django.template.loader import (
get_template, render_to_string, select_template,
)
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
}])
class TemplateLoaderTests(SimpleTestCase):
def test_get_template_first_engine(self):
template = get_template("template_loader/hello.html")
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_get_template_second_engine(self):
template = get_template("template_loader/goodbye.html")
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_get_template_using_engine(self):
template = get_template("template_loader/hello.html", using="django")
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_get_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
get_template("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_select_template_first_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/hello.html"])
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_select_template_second_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/goodbye.html"])
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_select_template_using_engine(self):
template = select_template(["template_loader/unknown.html",
"template_loader/hello.html"], using="django")
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_select_template_empty(self):
with self.assertRaises(TemplateDoesNotExist):
select_template([])
def test_select_template_string(self):
with self.assertRaisesMessage(
TypeError,
"select_template() takes an iterable of template names but got a "
"string: 'template_loader/hello.html'. Use get_template() if you "
"want to load a single template by name."
):
select_template('template_loader/hello.html')
def test_select_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
select_template(["template_loader/unknown.html",
"template_loader/missing.html"])
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[0].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_select_template_tries_all_engines_before_names(self):
template = select_template(["template_loader/goodbye.html",
"template_loader/hello.html"])
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_render_to_string_first_engine(self):
content = render_to_string("template_loader/hello.html")
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_second_engine(self):
content = render_to_string("template_loader/goodbye.html")
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_request(self):
request = RequestFactory().get('/foobar/')
content = render_to_string("template_loader/request.html", request=request)
self.assertEqual(content, "/foobar/\n")
def test_render_to_string_using_engine(self):
content = render_to_string("template_loader/hello.html", using="django")
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[-1].backend.name, 'django')
def test_render_to_string_with_list_first_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/hello.html"])
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_with_list_second_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/goodbye.html"])
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_list_using_engine(self):
content = render_to_string(["template_loader/unknown.html",
"template_loader/hello.html"], using="django")
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_with_list_empty(self):
with self.assertRaises(TemplateDoesNotExist):
render_to_string([])
def test_render_to_string_with_list_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string(["template_loader/unknown.html",
"template_loader/missing.html"])
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[0].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[1].tried[0][0].template_name,
'template_loader/unknown.html',
)
self.assertEqual(e.exception.chain[1].backend.name, 'django')
self.assertEqual(
e.exception.chain[2].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[2].backend.name, 'dummy')
self.assertEqual(
e.exception.chain[3].tried[0][0].template_name,
'template_loader/missing.html',
)
self.assertEqual(e.exception.chain[3].backend.name, 'django')
def test_render_to_string_with_list_tries_all_engines_before_names(self):
content = render_to_string(["template_loader/goodbye.html",
"template_loader/hello.html"])
self.assertEqual(content, "Goodbye! (Django templates)\n")
|
bsd-3-clause
|
wearpants/osf.io
|
website/addons/box/tests/test_serializer.py
|
11
|
1096
|
# -*- coding: utf-8 -*-
"""Serializer tests for the Box addon."""
import mock
from nose.tools import * # noqa (PEP8 asserts)
from website.addons.base.testing.serializers import StorageAddonSerializerTestSuiteMixin
from website.addons.box.tests.utils import MockBox
from website.addons.box.tests.factories import BoxAccountFactory
from website.addons.box.serializer import BoxSerializer
from tests.base import OsfTestCase
mock_client = MockBox()
class TestBoxSerializer(StorageAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'box'
Serializer = BoxSerializer
ExternalAccountFactory = BoxAccountFactory
client = mock_client
def setUp(self):
self.mock_valid = mock.patch.object(
BoxSerializer,
'credentials_are_valid',
return_value=True
)
self.mock_valid.start()
super(TestBoxSerializer, self).setUp()
def tearDown(self):
self.mock_valid.stop()
super(TestBoxSerializer, self).tearDown()
def set_provider_id(self, pid):
self.node_settings.folder_id = pid
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.