repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
Instagram/django
tests/modeltests/reserved_names/tests.py
92
1671
import datetime from django.test import TestCase from models import Thing class ReservedNameTests(TestCase): def generate(self): day1 = datetime.date(2005, 1, 1) t = Thing.objects.create(when='a', join='b', like='c', drop='d', alter='e', having='f', where=day1, has_hyphen='h') day2 = datetime.date(2006, 2, 2) u = Thing.objects.create(when='h', join='i', like='j', drop='k', alter='l', having='m', where=day2) def test_simple(self): day1 = datetime.date(2005, 1, 1) t = Thing.objects.create(when='a', join='b', like='c', drop='d', alter='e', having='f', where=day1, has_hyphen='h') self.assertEqual(t.when, 'a') day2 = datetime.date(2006, 2, 2) u = Thing.objects.create(when='h', join='i', like='j', drop='k', alter='l', having='m', where=day2) self.assertEqual(u.when, 'h') def test_order_by(self): self.generate() things = [t.when for t in Thing.objects.order_by('when')] self.assertEqual(things, ['a', 'h']) def test_fields(self): self.generate() v = Thing.objects.get(pk='a') self.assertEqual(v.join, 'b') self.assertEqual(v.where, datetime.date(year=2005, month=1, day=1)) def test_dates(self): self.generate() resp = Thing.objects.dates('where', 'year') self.assertEqual(list(resp), [ datetime.datetime(2005, 1, 1, 0, 0), datetime.datetime(2006, 1, 1, 0, 0), ]) def test_month_filter(self): self.generate() self.assertEqual(Thing.objects.filter(where__month=1)[0].when, 'a')
bsd-3-clause
stephane-martin/salt-debian-packaging
salt-2016.3.2/salt/states/apache_module.py
1
3640
# -*- coding: utf-8 -*- ''' Manage Apache Modules .. versionadded:: 2014.7.0 Enable and disable apache modules. .. code-block:: yaml Enable cgi module: apache_module.enabled: - name: cgi Disable cgi module: apache_module.disabled: - name: cgi ''' from __future__ import absolute_import from salt.ext.six import string_types # Import salt libs import salt.utils def __virtual__(): ''' Only load if a2enmod is available. ''' return 'apache_module' if 'apache.a2enmod' in __salt__ else False def enabled(name): ''' Ensure an Apache module is enabled. name Name of the Apache module ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} is_enabled = __salt__['apache.check_mod_enabled'](name) if not is_enabled: if __opts__['test']: msg = 'Apache module {0} is set to be enabled.'.format(name) ret['comment'] = msg ret['changes']['old'] = None ret['changes']['new'] = name ret['result'] = None return ret status = __salt__['apache.a2enmod'](name)['Status'] if isinstance(status, string_types) and 'enabled' in status: ret['result'] = True ret['changes']['old'] = None ret['changes']['new'] = name else: ret['result'] = False ret['comment'] = 'Failed to enable {0} Apache module'.format(name) if isinstance(status, string_types): ret['comment'] = ret['comment'] + ' ({0})'.format(status) return ret else: ret['comment'] = '{0} already enabled.'.format(name) return ret def enable(name): ''' Ensure an Apache module is enabled. .. warning:: This function is deprecated and will be removed in Salt Nitrogen. name Name of the Apache module ''' salt.utils.warn_until( 'Nitrogen', 'This functionality has been deprecated; use "apache_module.enabled" ' 'instead.' ) return enabled(name) def disabled(name): ''' Ensure an Apache module is disabled. name Name of the Apache module ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} is_enabled = __salt__['apache.check_mod_enabled'](name) if is_enabled: if __opts__['test']: msg = 'Apache module {0} is set to be disabled.'.format(name) ret['comment'] = msg ret['changes']['old'] = name ret['changes']['new'] = None ret['result'] = None return ret status = __salt__['apache.a2dismod'](name)['Status'] if isinstance(status, string_types) and 'disabled' in status: ret['result'] = True ret['changes']['old'] = name ret['changes']['new'] = None else: ret['result'] = False ret['comment'] = 'Failed to disable {0} Apache module'.format(name) if isinstance(status, string_types): ret['comment'] = ret['comment'] + ' ({0})'.format(status) return ret else: ret['comment'] = '{0} already disabled.'.format(name) return ret def disable(name): ''' Ensure an Apache module is disabled. .. warning:: This function is deprecated and will be removed in Salt Nitrogen. name Name of the Apache module ''' salt.utils.warn_until( 'Nitrogen', 'This functionality has been deprecated; use "apache_module.disabled" ' 'instead.' ) return disabled(name)
apache-2.0
kajgan/stbgui
lib/python/Components/ConditionalWidget.py
49
1710
from GUIComponent import GUIComponent from enigma import eTimer class ConditionalWidget(GUIComponent): def __init__(self, withTimer = True): GUIComponent.__init__(self) self.setConnect(None) if (withTimer): self.conditionCheckTimer = eTimer() self.conditionCheckTimer.callback.append(self.update) self.conditionCheckTimer.start(1000) def postWidgetCreate(self, instance): self.visible = 0 def setConnect(self, conditionalFunction): self.conditionalFunction = conditionalFunction def activateCondition(self, condition): if condition: self.visible = 1 else: self.visible = 0 def update(self): if (self.conditionalFunction != None): try: self.activateCondition(self.conditionalFunction()) except: self.conditionalFunction = None self.activateCondition(False) class BlinkingWidget(GUIComponent): def __init__(self): GUIComponent.__init__(self) self.blinking = False self.setBlinkTime(500) self.timer = eTimer() self.timer.callback.append(self.blink) def setBlinkTime(self, time): self.blinktime = time def blink(self): if self.blinking == True: self.visible = not self.visible def startBlinking(self): self.blinking = True self.timer.start(self.blinktime) def stopBlinking(self): self.blinking = False if self.visible: self.hide() self.timer.stop() class BlinkingWidgetConditional(BlinkingWidget, ConditionalWidget): def __init__(self): BlinkingWidget.__init__(self) ConditionalWidget.__init__(self) def activateCondition(self, condition): if (condition): if not self.blinking: # we are already blinking self.startBlinking() else: if self.blinking: # we are blinking self.stopBlinking()
gpl-2.0
iaksit/AB2017
ticket/views.py
2
2202
from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth.decorators import login_required from django.http import HttpResponseRedirect from django.shortcuts import render, get_object_or_404 from django.urls import reverse from collectivework import settings from ticket.forms import CreateTicketForm from ticket.models import Ticket def list_ticket(request): tickets = [] if request.user: if request.user.is_staff: tickets = Ticket.objects.all().order_by('-creation_date') elif request.user.is_authenticated(): tickets = Ticket.objects.filter(accepted=True).order_by('-creation_date') else: tickets = Ticket.objects.filter(accepted=True, completed=False, assigned_user=None).order_by( '-creation_date') return render(request, 'list_ticket.html', {'tickets': tickets}) @login_required(login_url=settings.LOGIN_URL) def create_ticket(request): if request.POST: form = CreateTicketForm(request.POST) if form.is_valid(): ticket = form.save(commit=False) ticket.request_in_user = request.user ticket.save() return HttpResponseRedirect('/ticket/') else: form = CreateTicketForm() return render(request, 'detail_ticket.html', {'form': form, 'title': "Yeni İstek Oluştur", 'creation': True}) @login_required(login_url=settings.LOGIN_URL) def show_ticket(request, id): ticket = get_object_or_404(Ticket, pk=id) form = CreateTicketForm(instance=ticket) return render(request, 'detail_ticket.html', {'form': form, 'title': "İstek Numarası: %s" % ticket.pk, 'creation': False}) @login_required(login_url=settings.LOGIN_URL) def list_my_ticket(request): tickets = Ticket.objects.filter(assigned_user=request.user).order_by('-creation_date') return render(request, 'list_ticket.html', {'tickets': tickets}) @staff_member_required def list_moderation_requests(request): tickets = Ticket.objects.filter(accepted=False, rejected=False, completed=False).order_by('-creation_date') return render(request, 'list_ticket.html', {'tickets': tickets})
gpl-3.0
chiviak/CouchPotatoServer
libs/suds/sax/date.py
160
10456
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Nathan Van Gheem ([email protected]) """ The I{xdate} module provides classes for converstion between XML dates and python objects. """ from logging import getLogger from suds import * from suds.xsd import * import time import datetime as dt import re log = getLogger(__name__) class Date: """ An XML date object. Supported formats: - YYYY-MM-DD - YYYY-MM-DD(z|Z) - YYYY-MM-DD+06:00 - YYYY-MM-DD-06:00 @ivar date: The object value. @type date: B{datetime}.I{date} """ def __init__(self, date): """ @param date: The value of the object. @type date: (date|str) @raise ValueError: When I{date} is invalid. """ if isinstance(date, dt.date): self.date = date return if isinstance(date, basestring): self.date = self.__parse(date) return raise ValueError, type(date) def year(self): """ Get the I{year} component. @return: The year. @rtype: int """ return self.date.year def month(self): """ Get the I{month} component. @return: The month. @rtype: int """ return self.date.month def day(self): """ Get the I{day} component. @return: The day. @rtype: int """ return self.date.day def __parse(self, s): """ Parse the string date. Supported formats: - YYYY-MM-DD - YYYY-MM-DD(z|Z) - YYYY-MM-DD+06:00 - YYYY-MM-DD-06:00 Although, the TZ is ignored because it's meaningless without the time, right? @param s: A date string. @type s: str @return: A date object. @rtype: I{date} """ try: year, month, day = s[:10].split('-', 2) year = int(year) month = int(month) day = int(day) return dt.date(year, month, day) except: log.debug(s, exec_info=True) raise ValueError, 'Invalid format "%s"' % s def __str__(self): return unicode(self) def __unicode__(self): return self.date.isoformat() class Time: """ An XML time object. Supported formats: - HH:MI:SS - HH:MI:SS(z|Z) - HH:MI:SS.ms - HH:MI:SS.ms(z|Z) - HH:MI:SS(+|-)06:00 - HH:MI:SS.ms(+|-)06:00 @ivar tz: The timezone @type tz: L{Timezone} @ivar date: The object value. @type date: B{datetime}.I{time} """ def __init__(self, time, adjusted=True): """ @param time: The value of the object. @type time: (time|str) @param adjusted: Adjust for I{local} Timezone. @type adjusted: boolean @raise ValueError: When I{time} is invalid. """ self.tz = Timezone() if isinstance(time, dt.time): self.time = time return if isinstance(time, basestring): self.time = self.__parse(time) if adjusted: self.__adjust() return raise ValueError, type(time) def hour(self): """ Get the I{hour} component. @return: The hour. @rtype: int """ return self.time.hour def minute(self): """ Get the I{minute} component. @return: The minute. @rtype: int """ return self.time.minute def second(self): """ Get the I{seconds} component. @return: The seconds. @rtype: int """ return self.time.second def microsecond(self): """ Get the I{microsecond} component. @return: The microsecond. @rtype: int """ return self.time.microsecond def __adjust(self): """ Adjust for TZ offset. """ if hasattr(self, 'offset'): today = dt.date.today() delta = self.tz.adjustment(self.offset) d = dt.datetime.combine(today, self.time) d = ( d + delta ) self.time = d.time() def __parse(self, s): """ Parse the string date. Patterns: - HH:MI:SS - HH:MI:SS(z|Z) - HH:MI:SS.ms - HH:MI:SS.ms(z|Z) - HH:MI:SS(+|-)06:00 - HH:MI:SS.ms(+|-)06:00 @param s: A time string. @type s: str @return: A time object. @rtype: B{datetime}.I{time} """ try: offset = None part = Timezone.split(s) hour, minute, second = part[0].split(':', 2) hour = int(hour) minute = int(minute) second, ms = self.__second(second) if len(part) == 2: self.offset = self.__offset(part[1]) if ms is None: return dt.time(hour, minute, second) else: return dt.time(hour, minute, second, ms) except: log.debug(s, exec_info=True) raise ValueError, 'Invalid format "%s"' % s def __second(self, s): """ Parse the seconds and microseconds. The microseconds are truncated to 999999 due to a restriction in the python datetime.datetime object. @param s: A string representation of the seconds. @type s: str @return: Tuple of (sec,ms) @rtype: tuple. """ part = s.split('.') if len(part) > 1: return (int(part[0]), int(part[1][:6])) else: return (int(part[0]), None) def __offset(self, s): """ Parse the TZ offset. @param s: A string representation of the TZ offset. @type s: str @return: The signed offset in hours. @rtype: str """ if len(s) == len('-00:00'): return int(s[:3]) if len(s) == 0: return self.tz.local if len(s) == 1: return 0 raise Exception() def __str__(self): return unicode(self) def __unicode__(self): time = self.time.isoformat() if self.tz.local: return '%s%+.2d:00' % (time, self.tz.local) else: return '%sZ' % time class DateTime(Date,Time): """ An XML time object. Supported formats: - YYYY-MM-DDB{T}HH:MI:SS - YYYY-MM-DDB{T}HH:MI:SS(z|Z) - YYYY-MM-DDB{T}HH:MI:SS.ms - YYYY-MM-DDB{T}HH:MI:SS.ms(z|Z) - YYYY-MM-DDB{T}HH:MI:SS(+|-)06:00 - YYYY-MM-DDB{T}HH:MI:SS.ms(+|-)06:00 @ivar datetime: The object value. @type datetime: B{datetime}.I{datedate} """ def __init__(self, date): """ @param date: The value of the object. @type date: (datetime|str) @raise ValueError: When I{tm} is invalid. """ if isinstance(date, dt.datetime): Date.__init__(self, date.date()) Time.__init__(self, date.time()) self.datetime = \ dt.datetime.combine(self.date, self.time) return if isinstance(date, basestring): part = date.split('T') Date.__init__(self, part[0]) Time.__init__(self, part[1], 0) self.datetime = \ dt.datetime.combine(self.date, self.time) self.__adjust() return raise ValueError, type(date) def __adjust(self): """ Adjust for TZ offset. """ if not hasattr(self, 'offset'): return delta = self.tz.adjustment(self.offset) try: d = ( self.datetime + delta ) self.datetime = d self.date = d.date() self.time = d.time() except OverflowError: log.warn('"%s" caused overflow, not-adjusted', self.datetime) def __str__(self): return unicode(self) def __unicode__(self): s = [] s.append(Date.__unicode__(self)) s.append(Time.__unicode__(self)) return 'T'.join(s) class UTC(DateTime): """ Represents current UTC time. """ def __init__(self, date=None): if date is None: date = dt.datetime.utcnow() DateTime.__init__(self, date) self.tz.local = 0 class Timezone: """ Timezone object used to do TZ conversions @cvar local: The (A) local TZ offset. @type local: int @cvar patten: The regex patten to match TZ. @type patten: re.Pattern """ pattern = re.compile('([zZ])|([\-\+][0-9]{2}:[0-9]{2})') LOCAL = ( 0-time.timezone/60/60 ) def __init__(self, offset=None): if offset is None: offset = self.LOCAL self.local = offset @classmethod def split(cls, s): """ Split the TZ from string. @param s: A string containing a timezone @type s: basestring @return: The split parts. @rtype: tuple """ m = cls.pattern.search(s) if m is None: return (s,) x = m.start(0) return (s[:x], s[x:]) def adjustment(self, offset): """ Get the adjustment to the I{local} TZ. @return: The delta between I{offset} and local TZ. @rtype: B{datetime}.I{timedelta} """ delta = ( self.local - offset ) return dt.timedelta(hours=delta)
gpl-3.0
ashwyn/eden-message_parser
modules/ClimateDataPortal/DSL/Units.py
53
11410
# -*- coding: utf-8 -*- import re counted_dimension_pattern = re.compile(r"(?:\w[^\^\/ ]*)(?:\^[0-9])?") class MeaninglessUnitsException(Exception): pass class DimensionError(Exception): pass class Units(object): """Used for dimensional and other analysis.""" __slots__ = ("_dimensions", "_positive") delta_strings = ("delta ", "Δ ") @staticmethod def parsed_from(unit_string, positive = None): "format example: m Kg^2 / s^2" if positive is None: positive = True for delta_string in Units.delta_strings: if unit_string.startswith(delta_string): unit_string = unit_string[len(delta_string):] positive = False break dimensions = {} for factor, dimension_counts in zip((1,-1), unit_string.split("/")): for match in counted_dimension_pattern.finditer(dimension_counts): dimension_spec = match.group() if "^" in dimension_spec: dimension, count = dimension_spec.split("^") else: dimension = dimension_spec count = 1 count = factor * int(count) try: existing_count = dimensions[dimension] except KeyError: dimensions[dimension] = count else: dimensions[dimension] += count return Units(dimensions, positive) def __init__(units, dimensions, positive): """Example: >>> c = Counter({"a": 4, "b": 2}) """ for dimension, count in dimensions.iteritems(): if not isinstance(count, int): raise DimensionError( "%s dimension count must be a whole number" % dimension ) units._dimensions = dimensions.copy() units._positive = bool(positive) def iteritems(units): return units._dimensions.iteritems() def __repr__(units): return "%s({%s}, %s)" % ( units._positive, units.__class__.__name__, ", ".join( map("%r: %r".__mod__, units._dimensions.iteritems()) ) ) def __str__(units): if not units._dimensions: return "(dimensionless)" else: negative_dimensions = [] positive_dimensions = [] for dimension, count in units._dimensions.iteritems(): if count < 0: negative_dimensions.append((dimension, count)) else: positive_dimensions.append((dimension, count)) dimension_strings = [] def dimension_group(group): for dimension, count in group: if " " in dimension: dimension_name = "(%s)" % dimension else: dimension_name = dimension if count == 1: dimension_strings.append(dimension_name) else: dimension_strings.append( "%s^%s" % ( dimension_name, #"²³⁴⁵⁶⁷⁸⁹"[count-2] count ) ) dimension_group(positive_dimensions) if negative_dimensions: dimension_strings.append("/") dimension_group(negative_dimensions) return ["Δ ", ""][units._positive]+(" ".join(dimension_strings)) def match_dimensions_of(units, other_units): return ( isinstance(other_units, WhateverUnitsAreNeeded) or units._dimensions == other_units._dimensions ) def __eq__(units, other_units): return ( units._dimensions == other_units._dimensions and units._positive == other_units._positive ) def __ne__(units, other_units): return not units.__eq__(other_units) def __add__(units, other_units): # signed + signed = signed # positive + signed = positive # signed + positive = positive # positive + positive = positive, but nonsense (used in average) return Units( units._dimensions, units._positive or other_units._positive ) def __sub__(units, other_units): # signed - signed = signed # positive - signed = positive # signed - positive = signed, but nonsense # positive - positive = signed return Units( units._dimensions, units._positive and not other_units._positive ) def _mul(units, other_units, multiplier): result = units._dimensions.copy() get = units._dimensions.get if not isinstance(other_units, WhateverUnitsAreNeeded): for dimension, count in other_units.iteritems(): result[dimension] = get(dimension, 0) + multiplier * count if result[dimension] == 0: del result[dimension] return Units( result, units._positive and other_units._positive ) def __mul__(units, other_units): # positive * positive = positive # positive * signed = signed # signed * positive = signed # signed * signed = signed, but nonsense return units._mul(other_units, 1) def __div__(units, other_units): # positive / positive = positive # positive / signed = signed # signed / positive = signed # signed / signed = signed return units._mul(other_units, -1) def __pow__(units, factor): # even = positive # odd = signed # zero is not allowed result = {} for dimension, count in units._dimensions.iteritems(): new_count = int(count * factor) if new_count != float(count) * float(factor): raise DimensionError( "Non-integral %s dimension encountered." % dimension ) result[dimension] = new_count if result[dimension] == 0: del result[dimension] return Units( result, units._positive ) class WhateverUnitsAreNeeded(object): def __init__(units, positive = None): if positive is None: positive = True units._positive = positive def __repr__(units): return "(Whatever units are needed)" def __str__(units): return "" def match_dimensions_of(units, other_units): return True __add__ = __sub__= __mul__ = __div__= __pow__ = \ lambda units, other_units: other_units def __eq__(units, other_units): return True Dimensionless = Units({}, positive = True) from . import Method units = Method("units") from . import ( Addition, Subtraction, Multiplication, Division, Pow, operations, aggregations, BinaryOperator, Average, Sum, Minimum, Maximum, Count, StandardDeviation, Count, Number ) def binop_units(binop, use_units): left_units = units(binop.left) right_units = units(binop.right) if left_units is not None and right_units is not None: use_units(left_units, right_units) else: binop.units = None @units.implementation(Addition) def addition_units(operation): def determine_units(left_units, right_units): if not left_units.match_dimensions_of(right_units): operation.units_error = MismatchedUnits( (operation, left_units, right_units) ) operation.units = None else: operation.units = left_units + right_units binop_units(operation, determine_units) return operation.units @units.implementation(Subtraction) def subtract_units(operation): def determine_units(left_units, right_units): if not left_units.match_dimensions_of(right_units): operation.units_error = ( "Incompatible units: %(left_units)s and %(right_units)s" % locals() ) operation.units = None else: operation.units = left_units - right_units binop_units(operation, determine_units) return operation.units @units.implementation(Multiplication) def multiply_units(operation): binop_units(operation, lambda left_units, right_units: setattr(operation, "units", left_units * right_units) ) return operation.units @units.implementation(Division) def divide_units(operation): binop_units(operation, lambda left_units, right_units: setattr(operation, "units", left_units / right_units) ) return operation.units @units.implementation(Pow) def raise_units_to_power(operation): def determine_units(left_units, right_units): if right_units is WhateverUnitsAreNeeded: operation.right_units = right_units = Dimensionless if right_units == Dimensionless: operation.units = left_units ** operation.right else: operation.units_error = "Exponents must be dimensionless, of the form n or 1/n" operation.units = None binop_units(operation, determine_units) return operation.units @units.implementation(Average, Sum, Minimum, Maximum) def aggregation_units(aggregation): aggregation.units = Units( { aggregation.sample_table.units_name:1 }, True # affine ) return aggregation.units @units.implementation(StandardDeviation) def stddev_determine_units(aggregation): aggregation.units = Units( { aggregation.sample_table.units_name:1 }, False # displacement ) return aggregation.units @units.implementation(Count) def count_units(count): count.units = Dimensionless return count.units # CV would also be constant units @units.implementation(Number) def number_units(number): return number.units # set in constructor @units.implementation(int, float) def primitive_number_units(number): return WhateverUnitsAreNeeded analysis = Method("analysis") @analysis.implementation(Number) def Number_analysis(number, out): out(number.value, " ", number.units) @analysis.implementation(*operations) def Binop_analysis(binop, out): def indent(*strings): out(" ", *strings) out("(") analysis(binop.left, indent) indent(binop.op) analysis(binop.right, indent) out(") # ", binop.units or "???") if hasattr(binop, "units_error"): out("# ", binop.units_error) @analysis.implementation(*aggregations) def aggregation_analysis(aggregation, out): out(type(aggregation).__name__, "( # ", aggregation.units or "???") def indent(*strings): out(" ", *strings) indent(str(aggregation.sample_table), ",") for specification in aggregation.specification: indent(specification, ",") out(")") @analysis.implementation(int, float) def primitive_number_analysis(number, out): out(number)
mit
imprazaguy/bluetool
test/ts/TP_CON_MAS_BV-37-C.py
1
3830
# TP/CON/MAS/BV-37-C [Master Data Length Update - minimum Receive Data Channel # PDU length and time] # # Verify that the IUT as Master correctly handles reception of an LL_LENGTH_REQ # PDU import bluetool from bluetool.core import HCIDataTransCoordinator, HCIDataTransWorker, LEHelper import bluetool.bluez as bluez import bluetool.command as btcmd import bluetool.event as btevt import bluetool.error as bterr from bluetool.utils import bytes2str, htole16 CONN_TIMEOUT_MS = 10000 class HCIVendorWriteLocalMaxRxOctets(btcmd.HCIVendorCommand, btcmd.CmdCompltEvtParamUnpacker): ocf = 0x85 def __init__(self, local_max_rx_octets): super(HCIVendorWriteLocalMaxRxOctets, self).__init__() self.local_max_rx_octets = local_max_rx_octets def pack_param(self): return ''.join((htole16(self.local_max_rx_octets))) btevt.register_cmd_complt_evt(HCIVendorWriteLocalMaxRxOctets) class IUT(HCIDataTransWorker): def main(self): peer_addr = self.recv() helper = LEHelper(self.sock) helper.reset() cmd = btcmd.HCILEWriteSuggestedDefaultDataLength(100, (100+14)*8) helper.send_hci_cmd_wait_cmd_complt_check_status(cmd) helper.create_connection_by_peer_addr(0, peer_addr, 60, 0, 200, 50) evt = helper.wait_connection_complete() if evt.status != 0: raise bterr.TestError( 'connection fail: status: 0x{:02x}'.format(evt.status)) self.log.info('connect to %s', bytes2str(evt.peer_addr)) conn_handle = evt.conn_handle helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE) self.send(conn_handle) self.wait() # Wait lower tester to connect helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE) self.wait() # Wait lower tester to finish data length update self.test_acl_trans_recv(CONN_TIMEOUT_MS / 1000) helper.disconnect(conn_handle, 0x13) helper.wait_disconnection_complete(conn_handle) class LowerTester(HCIDataTransWorker): def main(self): helper = LEHelper(self.sock) helper.reset() cmd = HCIVendorWriteLocalMaxRxOctets(100) helper.send_hci_cmd_wait_cmd_complt_check_status(cmd) helper.start_advertising(0xA0) evt = helper.wait_connection_complete() if evt.status != 0: raise bterr.TestError( 'connection fail: status: 0x{:02x}'.format(evt.status)) self.log.info('connect to %s', bytes2str(evt.peer_addr)) conn_handle = evt.conn_handle self.send(conn_handle) helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE) helper.set_data_len(conn_handle, 251) helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE) self.signal() # Trigger next step self.test_acl_trans_send(CONN_TIMEOUT_MS / 1000) helper.wait_disconnection_complete(conn_handle, CONN_TIMEOUT_MS) class TestManager(HCIDataTransCoordinator): def main(self): self.iut.send(self.lt.bd_addr) recv_conn_handle = self.iut.recv() # Wait lower tester connection establishment send_conn_handle = self.lt.recv() self.iut.signal() # Wait lower tester data length update self.lt.wait() self.iut.signal() acl_list = self.create_test_acl_data(send_conn_handle, 1, 251) succeeded = self.test_acl_trans(self.lt, self.iut, recv_conn_handle, acl_list, CONN_TIMEOUT_MS / 1000) if succeeded: return 0 return 1 bluetest = { 'coordinator': TestManager, 'worker': [ ('iut', IUT), ('lt', LowerTester) ] } if __name__ == "__main__": bluetool.log_to_stream() bluetool.run_config(bluetest, [0, 1])
mit
harisibrahimkv/django
tests/postgres_tests/test_aggregates.py
36
13844
import json from django.db.models.expressions import F, Value from django.test.testcases import skipUnlessDBFeature from django.test.utils import Approximate from . import PostgreSQLTestCase from .models import AggregateTestModel, StatTestModel try: from django.contrib.postgres.aggregates import ( ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, JSONBAgg, RegrAvgX, RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope, RegrSXX, RegrSXY, RegrSYY, StatAggregate, StringAgg, ) except ImportError: pass # psycopg2 is not installed class TestGeneralAggregate(PostgreSQLTestCase): @classmethod def setUpTestData(cls): AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0) AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1) AggregateTestModel.objects.create(boolean_field=False, char_field='Foo3', integer_field=2) AggregateTestModel.objects.create(boolean_field=True, char_field='Foo4', integer_field=0) def test_array_agg_charfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field')) self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']}) def test_array_agg_integerfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field')) self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]}) def test_array_agg_booleanfield(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field')) self.assertEqual(values, {'arrayagg': [True, False, False, True]}) def test_array_agg_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field')) self.assertEqual(values, {'arrayagg': []}) values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field')) self.assertEqual(values, {'arrayagg': []}) values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field')) self.assertEqual(values, {'arrayagg': []}) def test_bit_and_general(self): values = AggregateTestModel.objects.filter( integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 0}) def test_bit_and_on_only_true_values(self): values = AggregateTestModel.objects.filter( integer_field=1).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 1}) def test_bit_and_on_only_false_values(self): values = AggregateTestModel.objects.filter( integer_field=0).aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': 0}) def test_bit_and_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field')) self.assertEqual(values, {'bitand': None}) def test_bit_or_general(self): values = AggregateTestModel.objects.filter( integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 1}) def test_bit_or_on_only_true_values(self): values = AggregateTestModel.objects.filter( integer_field=1).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 1}) def test_bit_or_on_only_false_values(self): values = AggregateTestModel.objects.filter( integer_field=0).aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': 0}) def test_bit_or_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field')) self.assertEqual(values, {'bitor': None}) def test_bool_and_general(self): values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field')) self.assertEqual(values, {'booland': False}) def test_bool_and_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field')) self.assertEqual(values, {'booland': None}) def test_bool_or_general(self): values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field')) self.assertEqual(values, {'boolor': True}) def test_bool_or_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field')) self.assertEqual(values, {'boolor': None}) def test_string_agg_requires_delimiter(self): with self.assertRaises(TypeError): AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field')) def test_string_agg_charfield(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';')) self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo3;Foo4'}) def test_string_agg_empty_result(self): AggregateTestModel.objects.all().delete() values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';')) self.assertEqual(values, {'stringagg': ''}) @skipUnlessDBFeature('has_jsonb_agg') def test_json_agg(self): values = AggregateTestModel.objects.aggregate(jsonagg=JSONBAgg('char_field')) self.assertEqual(values, {'jsonagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']}) @skipUnlessDBFeature('has_jsonb_agg') def test_json_agg_empty(self): values = AggregateTestModel.objects.none().aggregate(jsonagg=JSONBAgg('integer_field')) self.assertEqual(values, json.loads('{"jsonagg": []}')) class TestAggregateDistinct(PostgreSQLTestCase): @classmethod def setUpTestData(cls): AggregateTestModel.objects.create(char_field='Foo') AggregateTestModel.objects.create(char_field='Foo') AggregateTestModel.objects.create(char_field='Bar') def test_string_agg_distinct_false(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=False)) self.assertEqual(values['stringagg'].count('Foo'), 2) self.assertEqual(values['stringagg'].count('Bar'), 1) def test_string_agg_distinct_true(self): values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=' ', distinct=True)) self.assertEqual(values['stringagg'].count('Foo'), 1) self.assertEqual(values['stringagg'].count('Bar'), 1) def test_array_agg_distinct_false(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=False)) self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo', 'Foo']) def test_array_agg_distinct_true(self): values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field', distinct=True)) self.assertEqual(sorted(values['arrayagg']), ['Bar', 'Foo']) class TestStatisticsAggregate(PostgreSQLTestCase): @classmethod def setUpTestData(cls): StatTestModel.objects.create( int1=1, int2=3, related_field=AggregateTestModel.objects.create(integer_field=0), ) StatTestModel.objects.create( int1=2, int2=2, related_field=AggregateTestModel.objects.create(integer_field=1), ) StatTestModel.objects.create( int1=3, int2=1, related_field=AggregateTestModel.objects.create(integer_field=2), ) # Tests for base class (StatAggregate) def test_missing_arguments_raises_exception(self): with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'): StatAggregate(x=None, y=None) def test_correct_source_expressions(self): func = StatAggregate(x='test', y=13) self.assertIsInstance(func.source_expressions[0], Value) self.assertIsInstance(func.source_expressions[1], F) def test_alias_is_required(self): class SomeFunc(StatAggregate): function = 'TEST' with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1')) # Test aggregates def test_corr_general(self): values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1')) self.assertEqual(values, {'corr': -1.0}) def test_corr_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1')) self.assertEqual(values, {'corr': None}) def test_covar_pop_general(self): values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1')) self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)}) def test_covar_pop_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1')) self.assertEqual(values, {'covarpop': None}) def test_covar_pop_sample(self): values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True)) self.assertEqual(values, {'covarpop': -1.0}) def test_covar_pop_sample_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True)) self.assertEqual(values, {'covarpop': None}) def test_regr_avgx_general(self): values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1')) self.assertEqual(values, {'regravgx': 2.0}) def test_regr_avgx_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1')) self.assertEqual(values, {'regravgx': None}) def test_regr_avgy_general(self): values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1')) self.assertEqual(values, {'regravgy': 2.0}) def test_regr_avgy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1')) self.assertEqual(values, {'regravgy': None}) def test_regr_count_general(self): values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1')) self.assertEqual(values, {'regrcount': 3}) def test_regr_count_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1')) self.assertEqual(values, {'regrcount': 0}) def test_regr_intercept_general(self): values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1')) self.assertEqual(values, {'regrintercept': 4}) def test_regr_intercept_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1')) self.assertEqual(values, {'regrintercept': None}) def test_regr_r2_general(self): values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1')) self.assertEqual(values, {'regrr2': 1}) def test_regr_r2_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1')) self.assertEqual(values, {'regrr2': None}) def test_regr_slope_general(self): values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1')) self.assertEqual(values, {'regrslope': -1}) def test_regr_slope_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1')) self.assertEqual(values, {'regrslope': None}) def test_regr_sxx_general(self): values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1')) self.assertEqual(values, {'regrsxx': 2.0}) def test_regr_sxx_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1')) self.assertEqual(values, {'regrsxx': None}) def test_regr_sxy_general(self): values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1')) self.assertEqual(values, {'regrsxy': -2.0}) def test_regr_sxy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1')) self.assertEqual(values, {'regrsxy': None}) def test_regr_syy_general(self): values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1')) self.assertEqual(values, {'regrsyy': 2.0}) def test_regr_syy_empty_result(self): StatTestModel.objects.all().delete() values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1')) self.assertEqual(values, {'regrsyy': None}) def test_regr_avgx_with_related_obj_and_number_as_argument(self): """ This is more complex test to check if JOIN on field and number as argument works as expected. """ values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field')) self.assertEqual(values, {'complex_regravgx': 1.0})
bsd-3-clause
mayapurmedia/wagtail
wagtail/wagtailadmin/tests/test_userbar.py
4
5151
from django.test import TestCase from django.test.client import RequestFactory from django.core.urlresolvers import reverse from django.template import Template, Context from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser from wagtail.tests.utils import WagtailTestUtils from wagtail.wagtailcore.models import Page, PAGE_TEMPLATE_VAR from wagtail.tests.testapp.models import BusinessIndex, BusinessChild class TestUserbarTag(TestCase): def setUp(self): self.user = get_user_model().objects.create_superuser(username='test', email='[email protected]', password='password') self.homepage = Page.objects.get(id=2) def dummy_request(self, user=None): request = RequestFactory().get('/') request.user = user or AnonymousUser() return request def test_userbar_tag(self): template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}") content = template.render(Context({ PAGE_TEMPLATE_VAR: self.homepage, 'request': self.dummy_request(self.user), })) self.assertIn("<!-- Wagtail user bar embed code -->", content) def test_userbar_tag_self(self): """ Ensure the userbar renders with `self` instead of `PAGE_TEMPLATE_VAR` """ template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}") content = template.render(Context({ 'self': self.homepage, 'request': self.dummy_request(self.user), })) self.assertIn("<!-- Wagtail user bar embed code -->", content) def test_userbar_tag_anonymous_user(self): template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}") content = template.render(Context({ PAGE_TEMPLATE_VAR: self.homepage, 'request': self.dummy_request(), })) # Make sure nothing was rendered self.assertEqual(content, '') class TestUserbarFrontend(TestCase, WagtailTestUtils): def setUp(self): self.login() self.homepage = Page.objects.get(id=2) def test_userbar_frontend(self): response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.homepage.id, ))) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/userbar/base.html') def test_userbar_frontend_anonymous_user_cannot_see(self): # Logout self.client.logout() response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.homepage.id, ))) # Check that the user recieved a forbidden message self.assertEqual(response.status_code, 403) class TestUserbarAddLink(TestCase, WagtailTestUtils): fixtures = ['test.json'] def setUp(self): self.login() self.homepage = Page.objects.get(url_path='/home/') self.event_index = Page.objects.get(url_path='/home/events/') self.business_index = BusinessIndex(title='Business', slug='business', live=True) self.homepage.add_child(instance=self.business_index) self.business_child = BusinessChild(title='Business Child', slug='child', live=True) self.business_index.add_child(instance=self.business_child) def test_page_allowing_subpages(self): response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.event_index.id, ))) # page allows subpages, so the 'add page' button should show expected_url = reverse('wagtailadmin_pages:add_subpage', args=(self.event_index.id, )) expected_link = '<a href="%s" target="_parent" class="action icon icon-plus" title="Add a child page">Add</a>' % expected_url self.assertContains(response, expected_link) def test_page_disallowing_subpages(self): response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.business_child.id, ))) # page disallows subpages, so the 'add page' button shouldn't show expected_url = reverse('wagtailadmin_pages:add_subpage', args=(self.business_index.id, )) expected_link = '<a href="%s" target="_parent" class="action icon icon-plus" title="Add a child page">Add</a>' % expected_url self.assertNotContains(response, expected_link) class TestUserbarModeration(TestCase, WagtailTestUtils): def setUp(self): self.login() self.homepage = Page.objects.get(id=2) self.homepage.save_revision() self.revision = self.homepage.get_latest_revision() def test_userbar_moderation(self): response = self.client.get(reverse('wagtailadmin_userbar_moderation', args=(self.revision.id, ))) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/userbar/base.html') def test_userbar_moderation_anonymous_user_cannot_see(self): # Logout self.client.logout() response = self.client.get(reverse('wagtailadmin_userbar_moderation', args=(self.revision.id, ))) # Check that the user recieved a forbidden message self.assertEqual(response.status_code, 403)
bsd-3-clause
arborh/tensorflow
tensorflow/python/data/kernel_tests/list_files_test.py
4
8898
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.list_files()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from os import path import shutil import tempfile from absl.testing import parameterized from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.framework import errors from tensorflow.python.platform import test from tensorflow.python.util import compat class ListFilesTest(test_base.DatasetTestBase, parameterized.TestCase): def setUp(self): self.tmp_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmp_dir, ignore_errors=True) def _touchTempFiles(self, filenames): for filename in filenames: open(path.join(self.tmp_dir, filename), 'a').close() @combinations.generate(test_base.default_test_combinations()) def testEmptyDirectory(self): with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, 'No files matched'): dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*')) # We need requires_initialization=True so that getNext uses # make_initializable_iterator instead of make_one_shot_iterator. # make_one_shot_iterator has an issue where it fails to capture control # dependencies when capturing the dataset, so it loses the assertion that # list_files matches at least one file. # TODO(b/140837601): Make this work with make_one_shot_iterator. self.getNext(dataset, requires_initialization=True) @combinations.generate(test_base.default_test_combinations()) def testSimpleDirectory(self): filenames = ['a', 'b', 'c'] self._touchTempFiles(filenames) dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*')) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in filenames ], assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testSimpleDirectoryNotShuffled(self): filenames = ['b', 'c', 'a'] self._touchTempFiles(filenames) dataset = dataset_ops.Dataset.list_files( path.join(self.tmp_dir, '*'), shuffle=False) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in sorted(filenames) ]) def testFixedSeedResultsInRepeatableOrder(self): filenames = ['a', 'b', 'c'] self._touchTempFiles(filenames) def dataset_fn(): return dataset_ops.Dataset.list_files( path.join(self.tmp_dir, '*'), shuffle=True, seed=37) expected_filenames = [ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in filenames ] all_actual_filenames = [] for _ in range(3): actual_filenames = [] next_element = self.getNext(dataset_fn(), requires_initialization=True) try: while True: actual_filenames.append(self.evaluate(next_element())) except errors.OutOfRangeError: pass all_actual_filenames.append(actual_filenames) # Each run should produce the same set of filenames, which may be # different from the order of `expected_filenames`. self.assertItemsEqual(expected_filenames, all_actual_filenames[0]) # However, the different runs should produce filenames in the same order # as each other. self.assertEqual(all_actual_filenames[0], all_actual_filenames[1]) self.assertEqual(all_actual_filenames[0], all_actual_filenames[2]) @combinations.generate(test_base.default_test_combinations()) def tesEmptyDirectoryInitializer(self): def dataset_fn(): return dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*')) self.assertDatasetProduces( dataset_fn(), expected_error=(errors.InvalidArgumentError, 'No files matched pattern'), requires_initialization=True) @combinations.generate(test_base.default_test_combinations()) def testSimpleDirectoryInitializer(self): filenames = ['a', 'b', 'c'] self._touchTempFiles(filenames) dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*')) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in filenames ], assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testFileSuffixes(self): filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc'] self._touchTempFiles(filenames) dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py')) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in filenames[1:-1] ], assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testFileMiddles(self): filenames = ['a.txt', 'b.py', 'c.pyc'] self._touchTempFiles(filenames) dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*.py*')) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in filenames[1:] ], assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testNoShuffle(self): filenames = ['a', 'b', 'c'] self._touchTempFiles(filenames) # Repeat the list twice and ensure that the order is the same each time. # NOTE(mrry): This depends on an implementation detail of `list_files()`, # which is that the list of files is captured when the iterator is # initialized. Otherwise, or if e.g. the iterator were initialized more than # once, it's possible that the non-determinism of `tf.matching_files()` # would cause this test to fail. However, it serves as a useful confirmation # that the `shuffle=False` argument is working as intended. # TODO(b/73959787): Provide some ordering guarantees so that this test is # more meaningful. dataset = dataset_ops.Dataset.list_files( path.join(self.tmp_dir, '*'), shuffle=False).repeat(2) next_element = self.getNext(dataset) expected_filenames = [] actual_filenames = [] for filename in filenames * 2: expected_filenames.append( compat.as_bytes(path.join(self.tmp_dir, filename))) actual_filenames.append(compat.as_bytes(self.evaluate(next_element()))) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) self.assertItemsEqual(expected_filenames, actual_filenames) self.assertEqual(actual_filenames[:len(filenames)], actual_filenames[len(filenames):]) @combinations.generate(test_base.default_test_combinations()) def testMultiplePatternsAsList(self): filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc'] self._touchTempFiles(filenames) patterns = [path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']] dataset = dataset_ops.Dataset.list_files(patterns) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in filenames[:-1] ], assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testMultiplePatternsAsTensor(self): filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc'] self._touchTempFiles(filenames) dataset = dataset_ops.Dataset.list_files( [path.join(self.tmp_dir, pat) for pat in ['*.py', '*.txt']]) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(path.join(self.tmp_dir, filename)) for filename in filenames[:-1] ], assert_items_equal=True) if __name__ == '__main__': test.main()
apache-2.0
buguelos/odoo
openerp/addons/base/ir/ir_rule.py
312
8048
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import fields, osv, expression from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.misc import unquote as unquote class ir_rule(osv.osv): _name = 'ir.rule' _order = 'name' _MODES = ['read', 'write', 'create', 'unlink'] def _eval_context_for_combinations(self): """Returns a dictionary to use as evaluation context for ir.rule domains, when the goal is to obtain python lists that are easier to parse and combine, but not to actually execute them.""" return {'user': unquote('user'), 'time': unquote('time')} def _eval_context(self, cr, uid): """Returns a dictionary to use as evaluation context for ir.rule domains.""" return {'user': self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid), 'time':time} def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} eval_context = self._eval_context(cr, uid) for rule in self.browse(cr, uid, ids, context): if rule.domain_force: res[rule.id] = expression.normalize_domain(eval(rule.domain_force, eval_context)) else: res[rule.id] = [] return res def _get_value(self, cr, uid, ids, field_name, arg, context=None): res = {} for rule in self.browse(cr, uid, ids, context): if not rule.groups: res[rule.id] = True else: res[rule.id] = False return res def _check_model_obj(self, cr, uid, ids, context=None): return not any(self.pool[rule.model_id.model].is_transient() for rule in self.browse(cr, uid, ids, context)) def _check_model_name(self, cr, uid, ids, context=None): # Don't allow rules on rules records (this model). return not any(rule.model_id.model == self._name for rule in self.browse(cr, uid, ids, context)) _columns = { 'name': fields.char('Name', select=1), 'active': fields.boolean('Active', help="If you uncheck the active field, it will disable the record rule without deleting it (if you delete a native record rule, it may be re-created when you reload the module."), 'model_id': fields.many2one('ir.model', 'Object',select=1, required=True, ondelete="cascade"), 'global': fields.function(_get_value, string='Global', type='boolean', store=True, help="If no group is specified the rule is global and applied to everyone"), 'groups': fields.many2many('res.groups', 'rule_group_rel', 'rule_group_id', 'group_id', 'Groups'), 'domain_force': fields.text('Domain'), 'domain': fields.function(_domain_force_get, string='Domain', type='binary'), 'perm_read': fields.boolean('Apply for Read'), 'perm_write': fields.boolean('Apply for Write'), 'perm_create': fields.boolean('Apply for Create'), 'perm_unlink': fields.boolean('Apply for Delete') } _order = 'model_id DESC' _defaults = { 'active': True, 'perm_read': True, 'perm_write': True, 'perm_create': True, 'perm_unlink': True, 'global': True, } _sql_constraints = [ ('no_access_rights', 'CHECK (perm_read!=False or perm_write!=False or perm_create!=False or perm_unlink!=False)', 'Rule must have at least one checked access right !'), ] _constraints = [ (_check_model_obj, 'Rules can not be applied on Transient models.', ['model_id']), (_check_model_name, 'Rules can not be applied on the Record Rules model.', ['model_id']), ] @tools.ormcache() def _compute_domain(self, cr, uid, model_name, mode="read"): if mode not in self._MODES: raise ValueError('Invalid mode: %r' % (mode,)) if uid == SUPERUSER_ID: return None cr.execute("""SELECT r.id FROM ir_rule r JOIN ir_model m ON (r.model_id = m.id) WHERE m.model = %s AND r.active is True AND r.perm_""" + mode + """ AND (r.id IN (SELECT rule_group_id FROM rule_group_rel g_rel JOIN res_groups_users_rel u_rel ON (g_rel.group_id = u_rel.gid) WHERE u_rel.uid = %s) OR r.global)""", (model_name, uid)) rule_ids = [x[0] for x in cr.fetchall()] if rule_ids: # browse user as super-admin root to avoid access errors! user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid) global_domains = [] # list of domains group_domains = {} # map: group -> list of domains for rule in self.browse(cr, SUPERUSER_ID, rule_ids): # read 'domain' as UID to have the correct eval context for the rule. rule_domain = self.read(cr, uid, [rule.id], ['domain'])[0]['domain'] dom = expression.normalize_domain(rule_domain) for group in rule.groups: if group in user.groups_id: group_domains.setdefault(group, []).append(dom) if not rule.groups: global_domains.append(dom) # combine global domains and group domains if group_domains: group_domain = expression.OR(map(expression.OR, group_domains.values())) else: group_domain = [] domain = expression.AND(global_domains + [group_domain]) return domain return [] def clear_cache(self, cr, uid): self._compute_domain.clear_cache(self) def domain_get(self, cr, uid, model_name, mode='read', context=None): dom = self._compute_domain(cr, uid, model_name, mode) if dom: # _where_calc is called as superuser. This means that rules can # involve objects on which the real uid has no acces rights. # This means also there is no implicit restriction (e.g. an object # references another object the user can't see). query = self.pool[model_name]._where_calc(cr, SUPERUSER_ID, dom, active_test=False) return query.where_clause, query.where_clause_params, query.tables return [], [], ['"' + self.pool[model_name]._table + '"'] def unlink(self, cr, uid, ids, context=None): res = super(ir_rule, self).unlink(cr, uid, ids, context=context) self.clear_cache(cr, uid) return res def create(self, cr, uid, vals, context=None): res = super(ir_rule, self).create(cr, uid, vals, context=context) self.clear_cache(cr, uid) return res def write(self, cr, uid, ids, vals, context=None): res = super(ir_rule, self).write(cr, uid, ids, vals, context=context) self.clear_cache(cr,uid) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
jbradberry/django-turn-generation
sample_project/sample_app/plugins.py
1
1455
from django.contrib.contenttypes.models import ContentType from . import models class TurnGeneration(object): realm_types = { 'testrealm': 'sample_app.testrealm', } agent_types = { 'testagent': 'sample_app.testagent', } permissions = { 'turngeneration.add_generator': '_is_host', 'turngeneration.change_generator': '_is_host', 'turngeneration.delete_generator': '_is_host', 'turngeneration.add_generationrule': '_is_host', 'turngeneration.change_generationrule': '_is_host', 'turngeneration.delete_generationrule': '_is_host', 'turngeneration.add_pause': '_is_player', 'turngeneration.change_pause': '_is_player', 'turngeneration.delete_pause': '_is_player', 'turngeneration.add_ready': '_is_player', 'turngeneration.change_ready': '_is_player', 'turngeneration.delete_ready': '_is_player', } def related_agents(self, realm, agent_type=None): ct = ContentType.objects.get_for_model(models.TestAgent) if agent_type is None: agent_type = ct if agent_type != ct: return return realm.agents.all() def _is_host(self, user, obj): return user.is_staff def _is_player(self, user, obj): return obj.user == user def auto_generate(self, realm): realm.generate() def force_generate(self, realm): realm.generate()
mit
madflow/weblate
weblate/trans/tests/test_widgets.py
11
3474
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2015 Michal Čihař <[email protected]> # # This file is part of Weblate <http://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Tests for widgets. """ from weblate.trans.tests.test_views import ViewTestCase from weblate.trans.views.widgets import WIDGETS from django.core.urlresolvers import reverse class WidgetsTest(ViewTestCase): ''' Testing of widgets. ''' def test_view_widgets_root(self): response = self.client.get( reverse('widgets_root') ) self.assertContains(response, 'Test') def test_view_widgets(self): response = self.client.get( reverse('widgets', kwargs=self.kw_project) ) self.assertContains(response, 'Test') def test_view_widgets_lang(self): response = self.client.get( reverse('widgets', kwargs=self.kw_project), {'lang': 'cs'} ) self.assertContains(response, 'Test') def test_view_engage(self): response = self.client.get( reverse('engage', kwargs=self.kw_project) ) self.assertContains(response, 'Test') def test_view_engage_lang(self): response = self.client.get( reverse('engage-lang', kwargs=self.kw_lang_project) ) self.assertContains(response, 'Test') def assert_widget(self, widget, response): if hasattr(WIDGETS[widget], 'redirect'): self.assertEqual(response.status_code, 302) elif 'svg' in WIDGETS[widget].content_type: self.assertSVG(response) else: self.assertPNG(response) def test_view_widget_image(self): for widget in WIDGETS: for color in WIDGETS[widget].colors: response = self.client.get( reverse( 'widget-image', kwargs={ 'project': self.project.slug, 'widget': widget, 'color': color, 'extension': 'png', } ) ) self.assert_widget(widget, response) def test_view_widget_image_lang(self): for widget in WIDGETS: for color in WIDGETS[widget].colors: response = self.client.get( reverse( 'widget-image-lang', kwargs={ 'project': self.project.slug, 'widget': widget, 'color': color, 'lang': 'cs', 'extension': 'png', } ) ) self.assert_widget(widget, response)
gpl-3.0
pawaranand/phr-frappe
frappe/modules/import_file.py
28
3039
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe, os, json from frappe.modules import get_module_path, scrub_dt_dn from frappe.utils import get_datetime_str def import_files(module, dt=None, dn=None, force=False): if type(module) is list: out = [] for m in module: out.append(import_file(m[0], m[1], m[2], force=force)) return out else: return import_file(module, dt, dn, force=force) def import_file(module, dt, dn, force=False): """Sync a file from txt if modifed, return false if not updated""" path = get_file_path(module, dt, dn) ret = import_file_by_path(path, force) return ret def get_file_path(module, dt, dn): dt, dn = scrub_dt_dn(dt, dn) path = os.path.join(get_module_path(module), os.path.join(dt, dn, dn + ".json")) return path def import_file_by_path(path, force=False): frappe.flags.in_import = True docs = read_doc_from_file(path) if docs: if not isinstance(docs, list): docs = [docs] for doc in docs: if not force: # check if timestamps match db_modified = frappe.db.get_value(doc['doctype'], doc['name'], 'modified') if db_modified and doc.get('modified')==get_datetime_str(db_modified): return False original_modified = doc.get("modified") import_doc(doc, force=force) if original_modified: # since there is a new timestamp on the file, update timestamp in if doc["doctype"] == doc["name"] and doc["name"]!="DocType": frappe.db.sql("""update tabSingles set value=%s where field="modified" and doctype=%s""", (original_modified, doc["name"])) else: frappe.db.sql("update `tab%s` set modified=%s where name=%s" % \ (doc['doctype'], '%s', '%s'), (original_modified, doc['name'])) frappe.flags.in_import = False return True def read_doc_from_file(path): doc = None if os.path.exists(path): with open(path, 'r') as f: doc = json.loads(f.read()) else: raise Exception, '%s missing' % path return doc ignore_values = { "Report": ["disabled"], "Print Format": ["disabled"] } ignore_doctypes = ["Page Role", "DocPerm"] def import_doc(docdict, force=False): docdict["__islocal"] = 1 doc = frappe.get_doc(docdict) ignore = [] if frappe.db.exists(doc.doctype, doc.name): old_doc = frappe.get_doc(doc.doctype, doc.name) if doc.doctype in ignore_values and not force: # update ignore values for key in ignore_values.get(doc.doctype) or []: doc.set(key, old_doc.get(key)) # update ignored docs into new doc for df in doc.meta.get_table_fields(): if df.options in ignore_doctypes and not force: doc.set(df.fieldname, []) ignore.append(df.options) # delete old frappe.delete_doc(doc.doctype, doc.name, force=1, ignore_doctypes=ignore, for_reload=True) doc.ignore_children_type = ignore doc.ignore_links = True doc.ignore_validate = True doc.ignore_permissions = True doc.ignore_mandatory = True doc.ignore_user_permissions = True doc.insert()
mit
liddiard/skry
attachments/migrations/0001_initial.py
1
3621
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.core.validators class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0002_remove_content_type_name'), ('authors', '0001_initial'), ] operations = [ migrations.CreateModel( name='Audio', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('caption', models.TextField(blank=True)), ('title', models.CharField(max_length=128)), ('file', models.FileField(upload_to=b'attachments/audio/%Y/%m/%d')), ('credit', models.ManyToManyField(related_name='news_audio', to='authors.Author', blank=True)), ], options={ 'verbose_name_plural': 'Audio', }, ), migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('caption', models.TextField(blank=True)), ('file', models.ImageField(upload_to=b'attachments/image/%Y/%m/%d')), ('request_id', models.PositiveIntegerField(null=True, blank=True)), ('credit', models.ManyToManyField(related_name='news_image', to='authors.Author', blank=True)), ('request_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Poll', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('question', models.CharField(max_length=128)), ('is_open', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='PollChoice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('text', models.CharField(max_length=128)), ('votes', models.PositiveIntegerField(default=0)), ('question', models.ForeignKey(to='attachments.Poll')), ], ), migrations.CreateModel( name='Review', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('item', models.CharField(max_length=64)), ('line_1', models.CharField(max_length=128, blank=True)), ('line_2', models.CharField(max_length=128, blank=True)), ('rating', models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(10)])), ], ), migrations.CreateModel( name='Video', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('caption', models.TextField(blank=True)), ('title', models.CharField(max_length=128)), ('youtube_id', models.CharField(max_length=16)), ('credit', models.ManyToManyField(related_name='news_video', to='authors.Author', blank=True)), ], options={ 'abstract': False, }, ), ]
mit
lsaffre/lino_book
lino_book/projects/team/settings/fixtures/demo.py
1
14548
# -*- coding: UTF-8 -*- # Copyright 2015-2020 Rumma & Ko Ltd # License: BSD (see file COPYING for details) import datetime from lino.api import rt, dd, _ from lino.utils import Cycler, i2d from lino.core.roles import SiteAdmin from lino_xl.lib.cal.choicelists import DurationUnits from lino_xl.lib.working.roles import Worker from lino.utils.quantities import Duration from lino.utils.mldbc import babel_named as named from lino.modlib.users.utils import create_user from lino_xl.lib.working.choicelists import ReportingTypes from lino_xl.lib.tickets.choicelists import SiteStates from lino_xl.lib.tickets.roles import Reporter, TicketsStaff UserTypes = rt.models.users.UserTypes def vote(user, ticket, state, **kw): u = rt.models.users.User.objects.get(username=user) t = rt.models.tickets.Ticket.objects.get(pk=ticket) s = rt.models.votes.VoteStates.get_by_name(state) vote = t.get_favourite(u) if vote is None: vote = rt.models.votes.Vote(user=u, votable=t, state=s, **kw) else: vote.state = s return vote def tickets_objects(): # was previously in tickets User = rt.models.users.User Company = rt.models.contacts.Company Person = rt.models.contacts.Person # Topic = rt.models.topics.Topic TT = rt.models.tickets.TicketType Ticket = rt.models.tickets.Ticket Group = rt.models.groups.Group Membership = rt.models.groups.Membership # Competence = rt.models.tickets.Competence # Interest = rt.models.topics.Interest Milestone = dd.plugins.tickets.milestone_model # Milestone = rt.models.deploy.Milestone # Project = rt.models.tickets.Project # Site = rt.models.tickets.Site Site = dd.plugins.tickets.site_model Link = rt.models.tickets.Link LinkTypes = rt.models.tickets.LinkTypes # Subscription = rt.models.tickets.Subscription #EntryType = rt.models.blogs.EntryType #Entry = rt.models.blogs.Entry # Star = rt.models.stars.Star # Tagging = rt.models.blogs.Tagging # Line = rt.models.courses.Line List = rt.models.lists.List customer = UserTypes.customer contributor = UserTypes.contributor dev = UserTypes.developer yield create_user("marc", customer) yield create_user("mathieu", contributor) yield create_user("luc", dev) yield create_user("jean", dev) # USERS = Cycler(User.objects.all()) WORKERS = Cycler(User.objects.filter( username__in='mathieu luc jean'.split())) # END_USERS = Cycler(User.objects.filter( # user_type=rt.models.users.UserTypes.user)) reporter_types = [t for t in UserTypes.get_list_items() if t.has_required_roles([Reporter])] REPORTERS = Cycler(User.objects.filter(user_type__in=reporter_types)) yield named(Group, _("Developers")) yield named(Group, _("Managers")) yield named(Group, _("Front-end team")) yield named(TT, _("Bugfix")) yield named(TT, _("Enhancement")) yield named(TT, _("Upgrade")) # sprint = named(Line, _("Sprint")) # yield sprint TYPES = Cycler(TT.objects.all()) # yield Topic(name="Lino Core", ref="linõ") # yield Topic(name="Lino Welfare", ref="welfäre") # yield Topic(name="Lino Cosi", ref="così") # yield Topic(name="Lino Voga", ref="faggio") # ref differs from name # TOPICS = Cycler(Topic.objects.all()) RTYPES = Cycler(ReportingTypes.objects()) GROUPS = Cycler(Group.objects.all()) PERSONS = Cycler(Person.objects.order_by("id")) COMPANIES = Cycler(Company.objects.order_by("id")) end_users = [] for ref in "welket welsch pypi docs bugs".split(): kw = dict(ref=ref, reporting_type=RTYPES.pop(), group=GROUPS.pop(), state=SiteStates.active) if ref in ("pypi", "docs", "bugs"): kw.update(name=ref) else: obj = COMPANIES.pop() eu = PERSONS.pop() end_users.append(eu) yield rt.models.contacts.Role(person=eu, company=obj) kw.update(company=obj) kw.update(name=str(obj)) kw.update(contact_person=eu) kw.update(private=True) yield Site(**kw) END_USERS = Cycler(end_users) yield Company(name="Saffre-Rumma") # for u in Company.objects.exclude(name="pypi"): # for i in range(3): # yield Interest(owner=u, topic=TOPICS.pop()) # prj1 = Project( # name="Framewörk", ref="linö", private=False, # company=COMPANIES.pop(), # reporting_type=RTYPES.pop(), # start_date=i2d(20090101)) # yield prj1 # yield Project( # name="Téam", ref="téam", start_date=i2d(20100101), # reporting_type=RTYPES.pop(), # company=COMPANIES.pop(), # parent=prj1, private=True) # prj2 = Project( # name="Documentatión", ref="docs", private=False, # reporting_type=RTYPES.pop(), # company=COMPANIES.pop(), # start_date=i2d(20090101), parent=prj1) # yield prj2 # yield Project( # name="Research", ref="research", private=False, # company=COMPANIES.pop(), # start_date=i2d(19980101), parent=prj2) # yield Project( # name="Shop", ref="shop", private=False, # reporting_type=RTYPES.pop(), # company=COMPANIES.pop(), # start_date=i2d(20120201), end_date=i2d(20120630)) # PROJECTS = Cycler(Project.objects.all()) # for u in User.objects.all(): # yield Competence(user=u, project=PROJECTS.pop()) # yield Competence(user=u, project=PROJECTS.pop()) if dd.is_installed('meetings'): SITES = Cycler(Site.objects.exclude(name="pypi")) # LISTS = Cycler(List.objects.all()) for i in range(7): site = SITES.pop() d = dd.today(i*2-20) kw = dict( user=WORKERS.pop(), start_date=d, # line=sprint, # project=PROJECTS.pop(), # expected=d, reached=d, # expected=d, reached=d, name="{}@{}".format(d.strftime("%Y%m%d"), site), # list=LISTS.pop() ) kw[Milestone.site_field_name] = site yield Milestone(**kw) # yield Milestone(site=SITES.pop(), expected=dd.today()) # yield Milestone(project=PROJECTS.pop(), expected=dd.today()) SITES = Cycler(Site.objects.all()) TicketStates = rt.models.tickets.TicketStates TSTATES = Cycler(TicketStates.objects()) # Vote = rt.models.votes.Vote # VoteStates = rt.models.votes.VoteStates # VSTATES = Cycler(VoteStates.objects()) num = [0] def ticket(summary, **kwargs): num[0] += 1 u = REPORTERS.pop() kwargs.update( ticket_type=TYPES.pop(), summary=summary, user=u, state=TSTATES.pop()) if num[0] % 2: kwargs.update(site=SITES.pop()) if num[0] % 4 == 0: kwargs.update(private=True) # else: # kwargs.update(private=False) if u.user_type.has_required_roles([Worker]): if num[0] % 5: kwargs.update(end_user=END_USERS.pop()) # if False: # kwargs.update(project=PROJECTS.pop()) obj = Ticket(**kwargs) yield obj # if obj.state.active: # yield Vote( # votable=obj, user=WORKERS.pop(), state=VSTATES.pop()) yield ticket("Föö fails to bar when baz") yield ticket("Bar is not always baz") yield ticket("Baz sucks") yield ticket("Foo and bar don't baz") yield ticket("Cannot create Foo", description="""<p>When I try to create a <b>Foo</b>, then I get a <b>Bar</b> instead of a Foo.</p>""") yield ticket("Sell bar in baz") yield ticket("No Foo after deleting Bar") yield ticket("Is there any Bar in Foo?") yield ticket("Foo never matches Bar") yield ticket("Where can I find a Foo when bazing Bazes?") yield ticket("Class-based Foos and Bars?") yield ticket("Foo cannot bar") # Example of memo markup: yield ticket("Bar cannot foo", description="""<p>Linking to [ticket 1] and to [url http://luc.lino-framework.org/blog/2015/0923.html blog].</p> """) yield ticket("Bar cannot baz") yield ticket("Bars have no foo") yield ticket("How to get bar from foo") TEXTS = Cycler([ln.strip() for ln in """ Foo never bars No more foo when bar is gone Cannot delete foo Why is foo so bar Irritating message when bar How can I see where bar? Misc optimizations in Baz Default account in invoices per partner 'NoneType' object has no attribute 'isocode' """.splitlines() if ln.strip()]) # n = Ticket.objects.count() for i in range(100): # yield ticket("Ticket {}".format(i+n+1)) yield ticket(TEXTS.pop()) if dd.is_installed('meetings'): Deployment = rt.models.deploy.Deployment WishTypes = rt.models.deploy.WishTypes WTYPES = Cycler(WishTypes.objects()) MILESTONES = Cycler(Milestone.objects.all()) for t in Ticket.objects.all(): # t.set_author_votes() if t.id % 4: yield Deployment( milestone=MILESTONES.pop(), ticket=t, wish_type=WTYPES.pop()) yield Link( type=LinkTypes.requires, parent=Ticket.objects.get(pk=1), child=Ticket.objects.get(pk=2)) # yield EntryType(**dd.str2kw('name', _('Release note'))) # yield EntryType(**dd.str2kw('name', _('Feature'))) # yield EntryType(**dd.str2kw('name', _('Upgrade instruction'))) # ETYPES = Cycler(EntryType.objects.all()) # TIMES = Cycler('12:34', '8:30', '3:45', '6:02') #blogger = USERS.pop() # def entry(offset, title, body, **kwargs): # kwargs['user'] = blogger # kwargs['entry_type'] = ETYPES.pop() # kwargs['pub_date'] = dd.today(offset) # kwargs['pub_time'] = TIMES.pop() # return Entry(title=title, body=body, **kwargs) # yield entry(-3, "Hello, world!", "This is our first blog entry.") # e = entry(-2, "Hello again", "Our second blog entry is about [ticket 1]") # yield e # yield Interest(owner=e, topic=TOPICS.pop()) # e = entry(-1, "Our third entry", """\ # Yet another blog entry about [ticket 1] and [ticket 2]. # This entry has two taggings""") # yield e # yield Interest(owner=e, topic=TOPICS.pop()) # yield Interest(owner=e, topic=TOPICS.pop()) for u in User.objects.all(): if u.user_type.has_required_roles([Reporter]): yield Membership(group=GROUPS.pop(), user=u) def working_objects(): # was previously in working Company = rt.models.contacts.Company # Vote = rt.models.votes.Vote SessionType = rt.models.working.SessionType Session = rt.models.working.Session Ticket = rt.models.tickets.Ticket User = rt.models.users.User UserTypes = rt.models.users.UserTypes # devs = (UserTypes.developer, UserTypes.senior) devs = [p for p in UserTypes.items() if p.has_required_roles([Worker]) and not p.has_required_roles([SiteAdmin])] workers = User.objects.filter(user_type__in=devs) WORKERS = Cycler(workers) TYPES = Cycler(SessionType.objects.all()) # TICKETS = Cycler(Ticket.objects.all()) DURATIONS = Cycler([12, 138, 90, 10, 122, 209, 37, 62, 179, 233, 5]) # every fourth ticket is unassigned and thus listed in # PublicTickets # for i, t in enumerate(Ticket.objects.exclude(private=True)): for i, t in enumerate(Ticket.objects.all()): if i % 4: t.assigned_to = WORKERS.pop() yield t for u in workers: # VOTES = Cycler(Vote.objects.filter(user=u)) # TICKETS = Cycler(Ticket.objects.filter(assigned_to=u)) TICKETS = Cycler(Ticket.objects.filter()) # if len(VOTES) == 0: # continue for offset in (0, -1, -3, -4): date = dd.demo_date(offset) worked = Duration() ts = datetime.datetime.combine(date, datetime.time(9, 0, 0)) for i in range(7): obj = Session( ticket=TICKETS.pop(), session_type=TYPES.pop(), user=u) obj.set_datetime('start', ts) d = DURATIONS.pop() worked += d if offset < 0: ts = DurationUnits.minutes.add_duration(ts, d) obj.set_datetime('end', ts) yield obj if offset == 0 or worked > 8: break ServiceReport = rt.models.working.ServiceReport # welket = Company.objects.get(name="welket") welket = rt.models.tickets.Site.objects.get(ref="welket").company yield ServiceReport( start_date=dd.today(-90), interesting_for=welket) def skills_objects(): "was previously in skills.fixtures.demo2" Skill = rt.models.skills.Skill Competence = rt.models.skills.Competence Demand = rt.models.skills.Demand # Ticket = rt.models.tickets.Ticket User = rt.models.users.User yield named(Skill, _('Analysis')) yield named(Skill, _('Code changes')) yield named(Skill, _('Documentation')) yield named(Skill, _('Testing')) yield named(Skill, _('Configuration')) yield named(Skill, _('Enhancement')) yield named(Skill, _('Optimization')) yield named(Skill, _('Offer')) SKILLS = Cycler(Skill.objects.all()) END_USERS = Cycler(dd.plugins.skills.end_user_model.objects.all()) i = 0 for j in range(2): for u in User.objects.all(): i += 1 yield Competence(user=u, faculty=SKILLS.pop()) if i % 2: yield Competence(user=u, faculty=SKILLS.pop()) if i % 3: yield Competence( user=u, faculty=SKILLS.pop(), end_user=END_USERS.pop()) for i, t in enumerate( dd.plugins.skills.demander_model.objects.all()): yield Demand(demander=t, skill=SKILLS.pop()) if i % 3: yield Demand(demander=t, skill=SKILLS.pop()) def votes_objects(): yield vote('mathieu', 1, 'candidate') yield vote('luc', 1, 'candidate') yield vote('jean', 2, 'assigned') def objects(): yield tickets_objects() yield working_objects() # yield skills_objects() # yield votes_objects()
agpl-3.0
Conan-Kudo/bodhi
bodhi/tests/server/consumers/test_automatic_updates.py
2
18330
# Copyright © 2019 Red Hat, Inc. # # This file is part of Bodhi. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """These are tests for the bodhi.server.consumers.automatic_updates module.""" from copy import deepcopy from unittest import mock import logging from fedora_messaging.api import Message from fedora_messaging.testing import mock_sends import pytest from bodhi.server.config import config from bodhi.server.consumers.automatic_updates import AutomaticUpdateHandler from bodhi.server.models import ( Build, Release, TestGatingStatus, Update, UpdateRequest, UpdateStatus, UpdateType, User ) from bodhi.tests.server import base @mock.patch('bodhi.server.consumers.automatic_updates.work_on_bugs_task', mock.Mock()) class TestAutomaticUpdateHandler(base.BasePyTestCase): """Test the automatic update handler.""" def setup_method(self, method): """Set up environment for each test.""" super().setup_method(method) self.release = self.db.query(Release).filter_by(name='F17').first() if self.release: self.release.create_automatic_updates = True self.db.flush() else: self.release = self.create_release('17', create_automatic_updates=True) body = { 'build_id': 442562, 'name': 'colord', 'tag_id': 214, 'instance': 's390', 'tag': 'f17-updates-candidate', 'user': 'sharkcz', 'version': '1.3.4', 'owner': 'sharkcz', 'release': '1.fc26', } self.sample_message = Message(topic='', body=body) self.sample_nvr = f"{body['name']}-{body['version']}-{body['release']}" self.db_factory = base.TransactionalSessionMaker(self.Session) self.handler = AutomaticUpdateHandler(self.db_factory) # Test the main code paths. def test_consume(self, caplog): """Assert that messages about tagged builds create an update.""" caplog.set_level(logging.DEBUG) # process the message self.handler(self.sample_message) # check if the update exists... update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() # ...and some of its properties assert update is not None assert update.type == UpdateType.unspecified assert update.status == UpdateStatus.pending assert update.autokarma == False assert update.test_gating_status is None assert update.builds[0].release == self.release expected_username = base.buildsys.DevBuildsys._build_data['owner_name'] assert update.user and update.user.name == expected_username assert not any(r.levelno >= logging.WARNING for r in caplog.records) @pytest.mark.parametrize('changelog', (True, None, "")) @mock.patch('bodhi.server.models.RpmBuild.get_changelog') def test_changelog(self, mock_generate_changelog, changelog): """Assert that update notes contain the changelog if it exists.""" if changelog: # fill the changelog here rather than in the decorator changelog = ('* Sat Aug 3 2013 Fedora Releng <[email protected]> - 2\n' '- Added a free money feature.\n* Tue Jun 11 2013 Randy <bowlofeggs@fpo>' ' - 2.0.1-2\n- Make users ☺\n') mock_generate_changelog.return_value = changelog # process the message self.handler(self.sample_message) # check if the update exists... update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() if changelog: assert update.notes == f"""Automatic update for colord-1.3.4-1.fc26. ##### **Changelog** ``` {changelog} ```""" else: # no changelog assert update.notes == "Automatic update for colord-1.3.4-1.fc26." @mock.patch('bodhi.server.models.RpmBuild.get_changelog') def test_bug_added(self, mock_generate_changelog): """Assert that a bug is added to the update if proper string is in changelog.""" changelog = ('* Sat Aug 3 2013 Fedora Releng <[email protected]> - 2\n' '- Added a free money feature.\n- Fix rhbz#112233.') mock_generate_changelog.return_value = changelog # process the message self.handler(self.sample_message) # check if the update exists... update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() assert update.notes == f"""Automatic update for colord-1.3.4-1.fc26. ##### **Changelog** ``` {changelog} ```""" assert len(update.bugs) > 0 assert update.bugs[0].bug_id == 112233 @mock.patch.dict(config, [('bz_exclude_rels', ['F17'])]) @mock.patch('bodhi.server.models.RpmBuild.get_changelog') def test_bug_not_added_excluded_release(self, mock_generate_changelog): """Assert that a bug is not added for excluded release.""" changelog = ('* Sat Aug 3 2013 Fedora Releng <[email protected]> - 2\n' '- Added a free money feature.\n- Fix rhbz#112233.') mock_generate_changelog.return_value = changelog # process the message self.handler(self.sample_message) # check if the update exists... update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() assert update.notes == f"""Automatic update for colord-1.3.4-1.fc26. ##### **Changelog** ``` {changelog} ```""" assert len(update.bugs) == 0 @mock.patch('bodhi.server.models.RpmBuild.get_changelog') def test_changelog_handled_exception(self, mock_generate_changelog): """Assert that update creation is succesful if get_changelog() raises ValueError.""" mock_generate_changelog.side_effect = ValueError('Handled exception') # process the message self.handler(self.sample_message) # check if the update exists... update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() assert update.notes == "Automatic update for colord-1.3.4-1.fc26." @mock.patch('bodhi.server.models.RpmBuild.get_changelog') def test_changelog_unhandled_exception(self, mock_generate_changelog): """Assert that update creation is not succesful if get_changelog() raises Exception.""" mock_generate_changelog.side_effect = Exception('Unhandled exception') with pytest.raises(Exception) as exc: self.handler(self.sample_message) assert str(exc.value) == 'Unhandled exception' def test_consume_with_orphan_build(self, caplog): """ Assert existing builds without an update can be handled. Such builds can exist e.g. if they're used in a buildroot override. """ caplog.set_level(logging.DEBUG) # Run the handler to create the build & update, then remove the update. self.handler(self.sample_message) build = self.db.query(Build).filter_by(nvr=self.sample_nvr).one() update = build.update build.update = None # satisfy foreign key constraint self.db.delete(update) # Now test with the same message again which should encounter the # build already existing in the database. self.handler(self.sample_message) # check if the update exists... update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() # ...and some of its properties assert update is not None assert update.type == UpdateType.unspecified assert update.status == UpdateStatus.pending assert update.test_gating_status is None expected_username = base.buildsys.DevBuildsys._build_data['owner_name'] assert update.user and update.user.name == expected_username assert not any(r.levelno >= logging.WARNING for r in caplog.records) def test_existing_pending_update(self, caplog): """ Ensure an update is not created if a matching pending one exists. """ caplog.set_level(logging.DEBUG) self.handler(self.sample_message) update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() # Move it back to Pending as if the user has manually created it update.status = UpdateStatus.pending update.request = UpdateRequest.testing self.db.add(update) self.db.flush() # Clear pending messages self.db.info['messages'] = [] caplog.clear() self.handler(self.sample_message) assert (f"Build, active update for {self.sample_nvr} exists already, skipping." in caplog.messages) def test_obsolete_testing_update(self, caplog): """Assert that older builds stuck in Testing get obsoleted.""" caplog.set_level(logging.DEBUG) self.handler(self.sample_message) update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() assert update is not None # Simulate update status after failed gating update.status = UpdateStatus.testing update.test_gating_status = TestGatingStatus.failed self.db.flush() # Clear pending messages self.db.info['messages'] = [] caplog.clear() # Create an update with a newer build msg = deepcopy(self.sample_message) msg.body['version'] = '1.3.5' msg.body['build_id'] = 442563 self.handler(msg) nvr = self.sample_nvr.replace('1.3.4', '1.3.5') old_update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() new_update = self.db.query(Update).filter( Update.builds.any(Build.nvr == nvr) ).first() assert new_update is not None assert old_update is not None assert new_update.status == UpdateStatus.pending assert old_update.status == UpdateStatus.obsolete def test_problem_obsoleting_older_update(self, caplog): """Assert that an error while obsoleting doesn't block a new update being created.""" caplog.set_level(logging.DEBUG) self.handler(self.sample_message) update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() assert update is not None # Simulate update status after failed gating update.status = UpdateStatus.testing update.test_gating_status = TestGatingStatus.failed self.db.flush() # Clear pending messages self.db.info['messages'] = [] caplog.clear() # Create an update with a newer build msg = deepcopy(self.sample_message) msg.body['version'] = '1.3.5' msg.body['build_id'] = 442563 with mock.patch('bodhi.server.models.Update.obsolete_older_updates', side_effect=Exception('Something gone wrong')): self.handler(msg) assert 'Problem obsoleting older updates: Something gone wrong' in caplog.messages # The new update should have been created and the old one should be stuck in testing nvr = self.sample_nvr.replace('1.3.4', '1.3.5') old_update = self.db.query(Update).filter( Update.builds.any(Build.nvr == self.sample_nvr) ).first() new_update = self.db.query(Update).filter( Update.builds.any(Build.nvr == nvr) ).first() assert new_update is not None assert old_update is not None assert new_update.status == UpdateStatus.pending assert old_update.status == UpdateStatus.testing # The following tests cover lesser-travelled code paths. @mock.patch('bodhi.server.consumers.automatic_updates.transactional_session_maker') def test___init___without_db_factory(self, transactional_session_maker): """__init__() should create db_factory if missing.""" handler = AutomaticUpdateHandler() assert handler.db_factory is transactional_session_maker.return_value transactional_session_maker.assert_called_once_with() # Test robustness: malformed messages, unknown koji builds, incomplete # buildinfo, release missing from the DB @pytest.mark.parametrize('missing_elem', ('tag', 'build_id', 'name', 'version', 'release')) def test_missing_mandatory_elems(self, missing_elem, caplog): """Test tag message without mandatory elements.""" caplog.set_level(logging.DEBUG) msg = deepcopy(self.sample_message) del msg.body[missing_elem] self.handler(msg) assert any(r.levelno == logging.DEBUG and r.getMessage() == f"Received incomplete tag message. Missing: {missing_elem}" for r in caplog.records) def test_unknown_koji_build(self, caplog): """Test tag message about unknown koji build.""" caplog.set_level(logging.DEBUG) msg = deepcopy(self.sample_message) msg.body['release'] += '.youdontknowme' self.handler(msg) assert any(r.levelno == logging.DEBUG and r.getMessage().startswith("Can't find Koji build for ") for r in caplog.records) def test_incomplete_koji_buildinfo_nvr(self, caplog): """Test koji returning incomplete buildinfo: no nvr.""" caplog.set_level(logging.DEBUG) msg = deepcopy(self.sample_message) msg.body['release'] += '.testmissingnvr' self.handler(msg) assert any(r.levelno == logging.DEBUG and r.getMessage().startswith("Koji build info for ") and r.getMessage().endswith(" doesn't contain 'nvr'.") for r in caplog.records) def test_incomplete_koji_buildinfo_owner(self, caplog): """Test koji returning incomplete buildinfo: no owner.""" caplog.set_level(logging.DEBUG) msg = deepcopy(self.sample_message) msg.body['release'] += '.noowner' self.handler(msg) assert any(r.levelno == logging.DEBUG and r.getMessage().startswith("Koji build info for ") and r.getMessage().endswith(" doesn't contain 'owner_name'.") for r in caplog.records) def test_missing_user(self, caplog): """Test Koji build user missing from DB.""" caplog.set_level(logging.DEBUG) expected_username = base.buildsys.DevBuildsys._build_data['owner_name'] # ensure user with expected name doesn't exist self.db.query(User).filter_by(name=expected_username).delete() self.db.flush() self.handler(self.sample_message) assert(f"Creating bodhi user for '{expected_username}'." in caplog.messages) def test_existing_user(self, caplog): """Test Koji build user existing in DB.""" caplog.set_level(logging.DEBUG) expected_username = base.buildsys.DevBuildsys._build_data['owner_name'] # ensure user with expected name exists user = self.db.query(User).filter_by(name=expected_username).first() if not user: user = User(name=expected_username) self.db.add(user) self.db.flush() assert(f"Creating bodhi user for '{expected_username}'." not in caplog.messages) # Test messages that should be ignored. def test_ignored_tag(self, caplog): """Test messages re: tags not configured for automatic updates.""" caplog.set_level(logging.DEBUG) msg = deepcopy(self.sample_message) bogus_tag = 'thisisntthetagyourelookingfor' msg.body['tag'] = bogus_tag with mock_sends(): self.handler(msg) assert any(x.startswith(f"Ignoring build being tagged into '{bogus_tag}'") for x in caplog.messages) def test_duplicate_message(self, caplog): """Assert that duplicate messages ignore existing build/update.""" caplog.set_level(logging.DEBUG) self.handler(self.sample_message) caplog.clear() with mock_sends(): self.handler(self.sample_message) assert (f"Build, active update for {self.sample_nvr} exists already, skipping." in caplog.messages) @mock.patch.dict(config, [('automatic_updates_blacklist', ['lmacken'])]) def test_user_in_blacklist(self, caplog): """Test that update not created if the koji build owner is in the blacklist""" caplog.set_level(logging.DEBUG) body = { 'build_id': 4425622, 'name': 'python-pants', 'tag_id': 214, 'instance': 's390', 'tag': 'f17-updates-testing-pending', 'user': 'lmacken', 'version': '1.3.4', 'owner': 'lmacken', 'release': '1.fc26', } self.sample_message = Message(topic='', body=body) self.sample_nvr = f"{body['name']}-{body['version']}-{body['release']}" with mock_sends(): self.handler(self.sample_message) assert (f"{self.sample_nvr} owned by lmacken who is listed in " "automatic_updates_blacklist, skipping." in caplog.messages)
gpl-2.0
zvolsky/edga
languages/pl.py
160
6719
# coding: utf8 { '!langcode!': 'pl', '!langname!': 'Polska', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyrażeniem postaci "pole1=\'nowawartość\'". Nie możesz uaktualnić lub usunąć wyników z JOIN:', '%s %%{row} deleted': 'Wierszy usuniętych: %s', '%s %%{row} updated': 'Wierszy uaktualnionych: %s', '%s selected': '%s wybranych', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', 'About': 'About', 'Access Control': 'Access Control', 'Administrative Interface': 'Administrative Interface', 'Administrative interface': 'Kliknij aby przejść do panelu administracyjnego', 'Ajax Recipes': 'Ajax Recipes', 'appadmin is disabled because insecure channel': 'administracja aplikacji wyłączona z powodu braku bezpiecznego połączenia', 'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?', 'Authentication': 'Uwierzytelnienie', 'Available Databases and Tables': 'Dostępne bazy danych i tabele', 'Buy this book': 'Buy this book', 'cache': 'cache', 'Cache': 'Cache', 'Cache Keys': 'Cache Keys', 'Cannot be empty': 'Nie może być puste', 'Change Password': 'Zmień hasło', 'change password': 'change password', 'Check to delete': 'Zaznacz aby usunąć', 'Check to delete:': 'Zaznacz aby usunąć:', 'Clear CACHE?': 'Clear CACHE?', 'Clear DISK': 'Clear DISK', 'Clear RAM': 'Clear RAM', 'Client IP': 'IP klienta', 'Community': 'Community', 'Components and Plugins': 'Components and Plugins', 'Controller': 'Kontroler', 'Copyright': 'Copyright', 'Current request': 'Aktualne żądanie', 'Current response': 'Aktualna odpowiedź', 'Current session': 'Aktualna sesja', 'customize me!': 'dostosuj mnie!', 'data uploaded': 'dane wysłane', 'Database': 'baza danych', 'Database %s select': 'wybór z bazy danych %s', 'db': 'baza danych', 'DB Model': 'Model bazy danych', 'Delete:': 'Usuń:', 'Demo': 'Demo', 'Deployment Recipes': 'Deployment Recipes', 'Description': 'Opis', 'design': 'projektuj', 'DISK': 'DISK', 'Disk Cache Keys': 'Disk Cache Keys', 'Disk Cleared': 'Disk Cleared', 'Documentation': 'Documentation', "Don't know what to do?": "Don't know what to do?", 'done!': 'zrobione!', 'Download': 'Download', 'E-mail': 'Adres e-mail', 'Edit': 'Edycja', 'Edit current record': 'Edytuj obecny rekord', 'edit profile': 'edit profile', 'Edit Profile': 'Edytuj profil', 'Edit This App': 'Edytuj tę aplikację', 'Email and SMS': 'Email and SMS', 'Errors': 'Errors', 'export as csv file': 'eksportuj jako plik csv', 'FAQ': 'FAQ', 'First name': 'Imię', 'Forms and Validators': 'Forms and Validators', 'Free Applications': 'Free Applications', 'Function disabled': 'Funkcja wyłączona', 'Group ID': 'ID grupy', 'Groups': 'Groups', 'Hello World': 'Witaj Świecie', 'Home': 'Home', 'How did you get here?': 'How did you get here?', 'import': 'import', 'Import/Export': 'Importuj/eksportuj', 'Index': 'Indeks', 'insert new': 'wstaw nowy rekord tabeli', 'insert new %s': 'wstaw nowy rekord do tabeli %s', 'Internal State': 'Stan wewnętrzny', 'Introduction': 'Introduction', 'Invalid email': 'Błędny adres email', 'Invalid Query': 'Błędne zapytanie', 'invalid request': 'Błędne żądanie', 'Key': 'Key', 'Last name': 'Nazwisko', 'Layout': 'Układ', 'Layout Plugins': 'Layout Plugins', 'Layouts': 'Layouts', 'Live Chat': 'Live Chat', 'login': 'login', 'Login': 'Zaloguj', 'logout': 'logout', 'Logout': 'Wyloguj', 'Lost Password': 'Przypomnij hasło', 'Main Menu': 'Menu główne', 'Manage Cache': 'Manage Cache', 'Menu Model': 'Model menu', 'My Sites': 'My Sites', 'Name': 'Nazwa', 'New Record': 'Nowy rekord', 'new record inserted': 'nowy rekord został wstawiony', 'next 100 rows': 'następne 100 wierszy', 'No databases in this application': 'Brak baz danych w tej aplikacji', 'Online examples': 'Kliknij aby przejść do interaktywnych przykładów', 'or import from csv file': 'lub zaimportuj z pliku csv', 'Origin': 'Źródło', 'Other Plugins': 'Other Plugins', 'Other Recipes': 'Other Recipes', 'Overview': 'Overview', 'Password': 'Hasło', "Password fields don't match": 'Pola hasła nie są zgodne ze sobą', 'Plugins': 'Plugins', 'Powered by': 'Zasilane przez', 'Preface': 'Preface', 'previous 100 rows': 'poprzednie 100 wierszy', 'Python': 'Python', 'Query:': 'Zapytanie:', 'Quick Examples': 'Quick Examples', 'RAM': 'RAM', 'RAM Cache Keys': 'RAM Cache Keys', 'Ram Cleared': 'Ram Cleared', 'Recipes': 'Recipes', 'Record': 'rekord', 'record does not exist': 'rekord nie istnieje', 'Record ID': 'ID rekordu', 'Record id': 'id rekordu', 'Register': 'Zarejestruj', 'register': 'register', 'Registration key': 'Klucz rejestracji', 'Role': 'Rola', 'Rows in Table': 'Wiersze w tabeli', 'Rows selected': 'Wybrane wiersze', 'Semantic': 'Semantic', 'Services': 'Services', 'Size of cache:': 'Size of cache:', 'state': 'stan', 'Statistics': 'Statistics', 'Stylesheet': 'Arkusz stylów', 'submit': 'submit', 'Submit': 'Wyślij', 'Support': 'Support', 'Sure you want to delete this object?': 'Czy na pewno chcesz usunąć ten obiekt?', 'Table': 'tabela', 'Table name': 'Nazwa tabeli', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'wartość\'". Takie coś jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.', 'The Core': 'The Core', 'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s', 'The Views': 'The Views', 'This App': 'This App', 'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)', 'Timestamp': 'Znacznik czasu', 'Twitter': 'Twitter', 'unable to parse csv file': 'nie można sparsować pliku csv', 'Update:': 'Uaktualnij:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Użyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapytań.', 'User %(id)s Registered': 'Użytkownik %(id)s został zarejestrowany', 'User ID': 'ID użytkownika', 'Verify Password': 'Potwierdź hasło', 'Videos': 'Videos', 'View': 'Widok', 'Welcome %s': 'Welcome %s', 'Welcome to web2py': 'Witaj w web2py', 'Welcome to web2py!': 'Welcome to web2py!', 'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s', 'You are successfully running web2py': 'You are successfully running web2py', 'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs', 'You visited the url %s': 'You visited the url %s', }
agpl-3.0
af1rst/bite-project
deps/mrtaskman/server/mapreduce/main.py
27
3036
#!/usr/bin/env python # # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main module for map-reduce implementation. This module should be specified as a handler for mapreduce URLs in app.yaml: handlers: - url: /mapreduce(/.*)? login: admin script: mapreduce/main.py """ import wsgiref.handlers from google.appengine.ext import webapp from mapreduce import handlers from mapreduce import status from google.appengine.ext.webapp import util try: from mapreduce.lib import pipeline except ImportError: pipeline = None STATIC_RE = r".*/([^/]*\.(?:css|js)|status|detail)$" class RedirectHandler(webapp.RequestHandler): """Redirects the user back to the status page.""" def get(self): new_path = self.request.path if not new_path.endswith("/"): new_path += "/" new_path += "status" self.redirect(new_path) def create_handlers_map(): """Create new handlers map. Returns: list of (regexp, handler) pairs for WSGIApplication constructor. """ pipeline_handlers_map = [] if pipeline: pipeline_handlers_map = pipeline.create_handlers_map(prefix=".*/pipeline") return pipeline_handlers_map + [ # Task queue handlers. (r".*/worker_callback", handlers.MapperWorkerCallbackHandler), (r".*/controller_callback", handlers.ControllerCallbackHandler), (r".*/kickoffjob_callback", handlers.KickOffJobHandler), (r".*/finalizejob_callback", handlers.FinalizeJobHandler), # RPC requests with JSON responses # All JSON handlers should have /command/ prefix. (r".*/command/start_job", handlers.StartJobHandler), (r".*/command/cleanup_job", handlers.CleanUpJobHandler), (r".*/command/abort_job", handlers.AbortJobHandler), (r".*/command/list_configs", status.ListConfigsHandler), (r".*/command/list_jobs", status.ListJobsHandler), (r".*/command/get_job_detail", status.GetJobDetailHandler), # UI static files (STATIC_RE, status.ResourceHandler), # Redirect non-file URLs that do not end in status/detail to status page. (r".*", RedirectHandler), ] def create_application(): """Create new WSGIApplication and register all handlers. Returns: an instance of webapp.WSGIApplication with all mapreduce handlers registered. """ return webapp.WSGIApplication(create_handlers_map(), debug=True) APP = create_application() def main(): util.run_wsgi_app(APP) if __name__ == "__main__": main()
apache-2.0
40223201/w16b_test
static/Brython3.1.3-20150514-095342/Lib/pydoc_data/topics.py
694
385454
# -*- coding: utf-8 -*- # Autogenerated by Sphinx on Sat Mar 23 15:42:31 2013 topics = {'assert': '\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an asterisk,\n called a "starred" target: The object must be a sequence with at\n least as many items as there are targets in the target list, minus\n one. The first items of the sequence are assigned, from left to\n right, to the targets before the starred target. The final items\n of the sequence are assigned to the targets after the starred\n target. A list of the remaining items in the sequence is then\n assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of items\n as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` or ``nonlocal``\n statement in the current code block: the name is bound to the\n object in the current local namespace.\n\n * Otherwise: the name is bound to the object in the global namespace\n or the outer namespace determined by ``nonlocal``, respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, ``IndexError`` is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the ``__setitem__()`` method is called\n with appropriate arguments.\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to integers. If either bound is\n negative, the sequence\'s length is added to it. The resulting\n bounds are clipped to lie between zero and the sequence\'s length,\n inclusive. Finally, the sequence object is asked to replace the\n slice with the items of the assigned sequence. The length of the\n slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print(x)\n\nSee also:\n\n **PEP 3132** - Extended Iterable Unpacking\n The specification for the ``*target`` feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n', 'atom-literals': "\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", 'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when ``dir()`` is called on the object. A sequence must be\n returned. ``dir()`` converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to an object instance, ``a.x`` is transformed into the\n call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a class, ``A.x`` is transformed into the call:\n ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``int``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n', 'attribute-references': '\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier (which can\nbe customized by overriding the ``__getattr__()`` method). If this\nattribute is not available, the exception ``AttributeError`` is\nraised. Otherwise, the type and value of the object produced is\ndetermined by the object. Multiple evaluations of the same attribute\nreference may yield different objects.\n', 'augassign': '\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'binary': '\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe ``*`` (multiplication) operator yields the product of its\narguments. The arguments must either both be numbers, or one argument\nmust be an integer and the other must be a sequence. In the former\ncase, the numbers are converted to a common type and then multiplied\ntogether. In the latter case, sequence repetition is performed; a\nnegative repetition factor yields an empty sequence.\n\nThe ``/`` (division) and ``//`` (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Integer division yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the ``ZeroDivisionError`` exception.\n\nThe ``%`` (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n``ZeroDivisionError`` exception. The arguments may be floating point\nnumbers, e.g., ``3.14%0.7`` equals ``0.34`` (since ``3.14`` equals\n``4*0.7 + 0.34``.) The modulo operator always yields a result with\nthe same sign as its second operand (or zero); the absolute value of\nthe result is strictly smaller than the absolute value of the second\noperand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: ``x == (x//y)*y + (x%y)``. Floor division and modulo are\nalso connected with the built-in function ``divmod()``: ``divmod(x, y)\n== (x//y, x%y)``. [2].\n\nIn addition to performing the modulo operation on numbers, the ``%``\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the ``divmod()``\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the ``abs()`` function if appropriate.\n\nThe ``+`` (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe ``-`` (subtraction) operator yields the difference of its\narguments. The numeric arguments are first converted to a common\ntype.\n', 'bitwise': '\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe ``&`` operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe ``^`` operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe ``|`` operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n', 'bltin-code-objects': '\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin ``compile()`` function and can be extracted from function objects\nthrough their ``__code__`` attribute. See also the ``code`` module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the ``exec()`` or ``eval()`` built-in functions.\n\nSee *The standard type hierarchy* for more information.\n', 'bltin-ellipsis-object': '\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n``Ellipsis`` (a built-in name). ``type(Ellipsis)()`` produces the\n``Ellipsis`` singleton.\n\nIt is written as ``Ellipsis`` or ``...``.\n', 'bltin-null-object': "\nThe Null Object\n***************\n\nThis object is returned by functions that don't explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named ``None`` (a built-in name). ``type(None)()`` produces\nthe same singleton.\n\nIt is written as ``None``.\n", 'bltin-type-objects': "\nType Objects\n************\n\nType objects represent the various object types. An object's type is\naccessed by the built-in function ``type()``. There are no special\noperations on types. The standard module ``types`` defines names for\nall standard built-in types.\n\nTypes are written like this: ``<class 'int'>``.\n", 'booleans': '\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a ``__bool__()`` method.\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n', 'break': '\nThe ``break`` statement\n***********************\n\n break_stmt ::= "break"\n\n``break`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition\nwithin that loop.\n\nIt terminates the nearest enclosing loop, skipping the optional\n``else`` clause if the loop has one.\n\nIf a ``for`` loop is terminated by ``break``, the loop control target\nkeeps its current value.\n\nWhen ``break`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the loop.\n', 'callable-types': '\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n', 'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n``__call__()`` method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to an iterable. Elements from this\niterable are treated as if they were additional positional arguments;\nif there are positional arguments *x1*, ..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n', 'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nthe ``==`` and ``!=`` operators *always* consider objects of different\ntypes to be unequal, while the ``<``, ``>``, ``>=`` and ``<=``\noperators raise a ``TypeError`` when comparing objects of different\ntypes that do not implement these operators for the given pair of\ntypes. You can control comparison behavior of objects of non-built-in\ntypes by defining rich comparison methods like ``__gt__()``, described\nin section *Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values ``float(\'NaN\')`` and ``Decimal(\'NaN\')`` are special. The\n are identical to themselves, ``x is x`` but are not equal to\n themselves, ``x != x``. Additionally, comparing any value to a\n not-a-number value will return ``False``. For example, both ``3 <\n float(\'NaN\')`` and ``float(\'NaN\') < 3`` will return ``False``.\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``[1,2,x] <= [1,2,y]`` has the\n same value as ``x <= y``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same ``(key, value)`` pairs. Order comparisons ``(\'<\', \'<=\', \'>=\',\n \'>\')`` raise ``TypeError``.\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets ``{1,2}`` and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, ``min()``, ``max()``, and ``sorted()`` produce\n undefined results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n``NotImplemented``.\n\nThe operators ``in`` and ``not in`` test for membership. ``x in s``\nevaluates to true if *x* is a member of *s*, and false otherwise. ``x\nnot in s`` returns the negation of ``x in s``. All built-in sequences\nand set types support this as well as dictionary, for which ``in``\ntests whether a the dictionary has a given key. For container types\nsuch as list, tuple, set, frozenset, dict, or collections.deque, the\nexpression ``x in y`` is equivalent to ``any(x is e or x == e for e in\ny)``.\n\nFor the string and bytes types, ``x in y`` is true if and only if *x*\nis a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nEmpty strings are always considered to be a substring of any other\nstring, so ``"" in "abc"`` will return ``True``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [4]\n', 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements, while the ``with`` statement\nallows the execution of initialization and finalization code around a\nblock of code. Function and class definitions are also syntactically\ncompound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print()`` calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception it is re-raised at the end of\nthe ``finally`` clause. If the ``finally`` clause raises another\nexception, the saved exception is set as the context of the new\nexception. If the ``finally`` clause executes a ``return`` or\n``break`` statement, the saved exception is discarded:\n\n def f():\n try:\n 1/0\n finally:\n return 42\n\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'continue': '\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n', 'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works that way:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_info()[2]`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.last_traceback``. Circular references which are garbage are\n detected when the option cycle detector is enabled (it\'s on by\n default), but can only be cleaned up if there are no Python-\n level ``__del__()`` methods involved. Refer to the documentation\n for the ``gc`` module for more information about how\n ``__del__()`` methods are handled by the cycle detector,\n particularly the description of the ``garbage`` value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function to compute the\n "official" string representation of an object. If at all possible,\n this should look like a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n ``<...some useful description...>`` should be returned. The return\n value must be a string object. If a class defines ``__repr__()``\n but not ``__str__()``, then ``__repr__()`` is also used when an\n "informal" string representation of instances of that class is\n required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by ``str(object)`` and the built-in functions ``format()``\n and ``print()`` to compute the "informal" or nicely printable\n string representation of an object. The return value must be a\n *string* object.\n\n This method differs from ``object.__repr__()`` in that there is no\n expectation that ``__str__()`` return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type ``object``\n calls ``object.__repr__()``.\n\nobject.__bytes__(self)\n\n Called by ``bytes()`` to compute a byte-string representation of an\n object. This should return a ``bytes`` object.\n\nobject.__format__(self, format_spec)\n\n Called by the ``format()`` built-in function (and by extension, the\n ``str.format()`` method of class ``str``) to produce a "formatted"\n string representation of an object. The ``format_spec`` argument is\n a string that contains a description of the formatting options\n desired. The interpretation of the ``format_spec`` argument is up\n to the type implementing ``__format__()``, however most classes\n will either delegate formatting to one of the built-in types, or\n use a similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` calls\n ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and ``x>=y`` calls\n ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define an ``__eq__()`` method it should not\n define a ``__hash__()`` operation either; if it defines\n ``__eq__()`` but not ``__hash__()``, its instances will not be\n usable as items in hashable collections. If a class defines\n mutable objects and implements an ``__eq__()`` method, it should\n not implement ``__hash__()``, since the implementation of hashable\n collections requires that a key\'s hash value is immutable (if the\n object\'s hash value changes, it will be in the wrong hash bucket).\n\n User-defined classes have ``__eq__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns an appropriate value such\n that ``x == y`` implies both that ``x is y`` and ``hash(x) ==\n hash(y)``.\n\n A class that overrides ``__eq__()`` and does not define\n ``__hash__()`` will have its ``__hash__()`` implicitly set to\n ``None``. When the ``__hash__()`` method of a class is ``None``,\n instances of the class will raise an appropriate ``TypeError`` when\n a program attempts to retrieve their hash value, and will also be\n correctly identified as unhashable when checking ``isinstance(obj,\n collections.Hashable``).\n\n If a class that overrides ``__eq__()`` needs to retain the\n implementation of ``__hash__()`` from a parent class, the\n interpreter must be told this explicitly by setting ``__hash__ =\n <ParentClass>.__hash__``.\n\n If a class that does not override ``__eq__()`` wishes to suppress\n hash support, it should include ``__hash__ = None`` in the class\n definition. A class which defines its own ``__hash__()`` that\n explicitly raises a ``TypeError`` would be incorrectly identified\n as hashable by an ``isinstance(obj, collections.Hashable)`` call.\n\n Note: By default, the ``__hash__()`` values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also ``PYTHONHASHSEED``.\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``. When this method\n is not defined, ``__len__()`` is called, if it is defined, and the\n object is considered true if its result is nonzero. If a class\n defines neither ``__len__()`` nor ``__bool__()``, all its instances\n are considered true.\n', 'debugger': '\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the ``readline`` module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the ``print`` command.\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: ``pdb.py`` now accepts a ``-c`` option that\nexecutes commands as if given in a ``.pdbrc`` file, see *Debugger\nCommands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``continue`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type ``continue``, or you can\n step through the statement using ``step`` or ``next`` (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module ``__main__`` is used. (See\n the explanation of the built-in ``exec()`` or ``eval()``\n functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When ``runeval()`` returns, it returns the\n value of the expression. Otherwise this function is similar to\n ``run()``.\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n ``continue`` command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n``h(elp)`` means that either ``h`` or ``help`` can be used to enter\nthe help command (but not ``he`` or ``hel``, nor ``H`` or ``Help`` or\n``HELP``). Arguments to commands must be separated by whitespace\n(spaces or tabs). Optional arguments are enclosed in square brackets\n(``[]``) in the command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n(``|``).\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a ``list`` command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint (``!``). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by\n``;;``. (A single ``;`` is not used as it is the separator for\nmultiple commands in a line that is passed to the Python parser.) No\nintelligence is applied to separating the commands; the input is split\nat the first ``;;`` pair, even if it is in the middle of a quoted\nstring.\n\nIf a file ``.pdbrc`` exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ``.pdbrc`` can now contain commands that\ncontinue debugging, such as ``continue`` or ``next``. Previously,\nthese commands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. ``help pdb``\n displays the full documentation (the docstring of the ``pdb``\n module). Since the *command* argument must be an identifier,\n ``help exec`` must be entered to get help on the ``!`` command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on ``sys.path``. Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for ``break``.\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just ``end`` to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) print some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with ``end``; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between ``next`` and\n ``step`` is that ``step`` stops inside a called function, while\n ``next`` executes called functions at (nearly) full speed, only\n stopping at the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a ``for`` loop or out\n of a ``finally`` clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With ``.`` as argument, list 11 lines around the current line.\n With one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by ``->``. If\n an exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ``>>``, if it\n differs from the current line.\n\n New in version 3.2: The ``>>`` marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for ``list``.\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np(rint) expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\npp expression\n\n Like the ``print`` command, except the value of the expression is\n pretty-printed using the ``pprint`` module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the ``code`` module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by ``%1``, ``%2``, and so on, while ``%*`` is replaced by\n all the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ``.pdbrc`` file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n ``global`` statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with ``shlex`` and the result is used as the new\n ``sys.argv``. History, breakpoints, actions and debugger options\n are preserved. ``restart`` is an alias for ``run``.\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module is\n determined by the ``__name__`` in the frame globals.\n', 'del': '\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n', 'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'else': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions ``eval()`` and ``exec()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as ``nonlocal``. If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, or\nafter ``as`` in a ``with`` statement or ``except`` clause. The\n``import`` statement of the form ``from ... import *`` binds all names\ndefined in the imported module, except those beginning with an\nunderscore. This form may only be used at the module level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the ``global`` statement occurs within a block, all uses of the\nname specified in the statement refer to the binding of that name in\nthe top-level namespace. Names are resolved in the top-level\nnamespace by searching the global namespace, i.e. the namespace of the\nmodule containing the code block, and the builtins namespace, the\nnamespace of the module ``builtins``. The global namespace is\nsearched first. If the name is not found there, the builtins\nnamespace is searched. The global statement must precede all uses of\nthe name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``builtins``; when in any other module, ``__builtins__`` is an alias\nfor the dictionary of the ``builtins`` module itself.\n``__builtins__`` can be set to a user-created dictionary to create a\nweak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``builtins`` module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exprlists': '\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n', 'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, ``077e010`` is legal, and denotes the same\nnumber as ``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n', 'for': '\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': '\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s" | "a"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings ``\'10\'`` or\n``\':-]\'``) within a format string. The *arg_name* can be followed by\nany number of index or attribute expressions. An expression of the\nform ``\'.name\'`` selects the named attribute using ``getattr()``,\nwhile an expression of the form ``\'[index]\'`` does an index lookup\nusing ``__getitem__()``.\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nThree conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, ``\'!r\'`` which calls ``repr()`` and ``\'!a\'``\nwhich calls ``ascii()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <a character other than \'{\' or \'}\'>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'{\' or \'}\'. The\npresence of a fill character is signaled by the character following\nit, which must be one of the alignment options. If the second\ncharacter of *format_spec* is not a valid alignment option, then it is\nassumed that both the fill character and the alignment option are\nabsent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective ``\'0b\'``, ``\'0o\'``, or\n``\'0x\'`` to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for ``\'g\'`` and ``\'G\'``\nconversions, trailing zeros are not removed from the result.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 3.1: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero (``\'0\'``) character enables\nsign-aware zero-padding for numeric types. This is equivalent to a\n*fill* character of ``\'0\'`` with an *alignment* type of ``\'=\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``, but converts ``nan`` to |\n | | ``NAN`` and ``inf`` to ``INF``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Positive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to ``\'g\'``, except with at least one digit past |\n | | the decimal point and a default precision of 12. This is |\n | | intended to match ``str()``, except you can add the other |\n | | format modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n', 'global': '\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in a string\nor code object supplied to the built-in ``exec()`` function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by ``global`` statements in\nthe code containing the function call. The same applies to the\n``eval()`` and ``compile()`` functions.\n', 'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters ``A`` through ``Z``, the underscore ``_`` and, except for the\nfirst character, the digits ``0`` through ``9``.\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n``unicodedata`` module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= <all characters in general categories Lu, Ll, Lt, Lm, Lo, Nl, the underscore, and characters with the Other_ID_Start property>\n id_continue ::= <all characters in id_start, plus characters in the categories Mn, Mc, Nd, Pc and others with the Other_ID_Continue property>\n xid_start ::= <all characters in id_start whose NFKC normalization is in "id_start xid_continue*">\n xid_continue ::= <all characters in id_continue whose NFKC normalization is in "id_continue*">\n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'if': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'imaginary': '\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': '\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no ``from`` clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope where\n the ``import`` statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules is\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by ``as``, then the name following\n ``as`` is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe ``from`` form uses a slightly more complex process:\n\n1. find the module specified in the ``from`` clause loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the ``import`` clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, ``ImportError`` is raised.\n\n 4. otherwise, a reference to that value is bound in the local\n namespace, using the name in the ``as`` clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star (``\'*\'``), all public\nnames defined in the module are bound in the local namespace for the\nscope where the ``import`` statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope.\nAttempting to use it in class or function definitions will raise a\n``SyntaxError``.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. The\nwild card form of import --- ``import *`` --- is only allowed at the\nmodule level. Attempting to use it in class or function definitions\nwill raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimport mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are ``absolute_import``,\n``division``, ``generators``, ``unicode_literals``,\n``print_function``, ``nested_scopes`` and ``with_statement``. They\nare all redundant because they are always enabled, and only kept for\nbackwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions ``exec()`` and\n``compile()`` that occur in a module ``M`` containing a future\nstatement will, by default, use the new syntax or semantics associated\nwith the future statement. This can be controlled by optional\narguments to ``compile()`` --- see the documentation of that function\nfor details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', 'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nthe ``==`` and ``!=`` operators *always* consider objects of different\ntypes to be unequal, while the ``<``, ``>``, ``>=`` and ``<=``\noperators raise a ``TypeError`` when comparing objects of different\ntypes that do not implement these operators for the given pair of\ntypes. You can control comparison behavior of objects of non-built-in\ntypes by defining rich comparison methods like ``__gt__()``, described\nin section *Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values ``float(\'NaN\')`` and ``Decimal(\'NaN\')`` are special. The\n are identical to themselves, ``x is x`` but are not equal to\n themselves, ``x != x``. Additionally, comparing any value to a\n not-a-number value will return ``False``. For example, both ``3 <\n float(\'NaN\')`` and ``float(\'NaN\') < 3`` will return ``False``.\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``[1,2,x] <= [1,2,y]`` has the\n same value as ``x <= y``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same ``(key, value)`` pairs. Order comparisons ``(\'<\', \'<=\', \'>=\',\n \'>\')`` raise ``TypeError``.\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets ``{1,2}`` and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, ``min()``, ``max()``, and ``sorted()`` produce\n undefined results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n``NotImplemented``.\n\nThe operators ``in`` and ``not in`` test for membership. ``x in s``\nevaluates to true if *x* is a member of *s*, and false otherwise. ``x\nnot in s`` returns the negation of ``x in s``. All built-in sequences\nand set types support this as well as dictionary, for which ``in``\ntests whether a the dictionary has a given key. For container types\nsuch as list, tuple, set, frozenset, dict, or collections.deque, the\nexpression ``x in y`` is equivalent to ``any(x is e or x == e for e in\ny)``.\n\nFor the string and bytes types, ``x in y`` is true if and only if *x*\nis a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nEmpty strings are always considered to be a substring of any other\nstring, so ``"" in "abc"`` will return ``True``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [4]\n', 'integers': '\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': '\nLambdas\n*******\n\n lambda_form ::= "lambda" [parameter_list]: expression\n lambda_form_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda forms (lambda expressions) have the same syntactic position as\nexpressions. They are a shorthand to create anonymous functions; the\nexpression ``lambda arguments: expression`` yields a function object.\nThe unnamed object behaves like a function object defined with\n\n def <lambda>(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda forms cannot contain\nstatements or annotations.\n', 'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n', 'naming': "\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The string argument passed\nto the built-in functions ``eval()`` and ``exec()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as ``nonlocal``. If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, or\nafter ``as`` in a ``with`` statement or ``except`` clause. The\n``import`` statement of the form ``from ... import *`` binds all names\ndefined in the imported module, except those beginning with an\nunderscore. This form may only be used at the module level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the ``global`` statement occurs within a block, all uses of the\nname specified in the statement refer to the binding of that name in\nthe top-level namespace. Names are resolved in the top-level\nnamespace by searching the global namespace, i.e. the namespace of the\nmodule containing the code block, and the builtins namespace, the\nnamespace of the module ``builtins``. The global namespace is\nsearched first. If the name is not found there, the builtins\nnamespace is searched. The global statement must precede all uses of\nthe name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``builtins``; when in any other module, ``__builtins__`` is an alias\nfor the dictionary of the ``builtins`` module itself.\n``__builtins__`` can be set to a user-created dictionary to create a\nweak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``builtins`` module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n", 'nonlocal': '\nThe ``nonlocal`` statement\n**************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe ``nonlocal`` statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope. This is\nimportant because the default behavior for binding is to search the\nlocal namespace first. The statement allows encapsulated code to\nrebind variables outside of the local scope besides the global\n(module) scope.\n\nNames listed in a ``nonlocal`` statement, unlike to those listed in a\n``global`` statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a ``nonlocal`` statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also:\n\n **PEP 3104** - Access to Names in Outer Scopes\n The specification for the ``nonlocal`` statement.\n', 'numbers': "\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n", 'numeric-types': "\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``). For instance, to evaluate the expression ``x + y``, where\n *x* is an instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()``. Note that\n ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``) with reflected (swapped) operands. These functions are only\n called if the left operand does not support the corresponding\n operation and the operands are of different types. [2] For\n instance, to evaluate the expression ``x - y``, where *y* is an\n instance of a class that has an ``__rsub__()`` method,\n ``y.__rsub__(x)`` is called if ``x.__sub__(y)`` returns\n *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand's type is a subclass of the left operand's\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand's\n non-reflected method. This behavior allows subclasses to\n override their ancestors' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``float()`` and ``round()``. Should return a value of\n the appropriate type.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing, or in the\n built-in ``bin()``, ``hex()`` and ``oct()`` functions). Must return\n an integer.\n", 'objects': '\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, ``id(x)`` is the\nmemory address where ``x`` is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The ``type()`` function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement and the \'``with``\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n', 'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` ``x`` | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not in``, ``is``, ``is not``, ``<``, | Comparisons, including membership |\n| ``<=``, ``>``, ``>=``, ``!=``, ``==`` | tests and identity tests, |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key: value...}``, ``{expressions...}`` | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. The function\n ``math.fmod()`` returns a result whose sign matches the sign of\n the first argument instead, and so returns ``-1e-100`` in this\n case. Which approach is more appropriate depends on the\n application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for ``x//y`` to be one larger than ``(x-x%y)//y`` due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[3] While comparisons between strings make sense at the byte level,\n they may be counter-intuitive to users. For example, the strings\n ``"\\u00C7"`` and ``"\\u0327\\u0043"`` compare differently, even\n though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[4] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[5] The ``%`` operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n', 'pass': '\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': '\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n``10**2`` returns ``100``, but ``10**-2`` returns ``0.01``.\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``complex`` number. (In earlier versions it raised a\n``ValueError``.)\n', 'raise': '\nThe ``raise`` statement\n***********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, ``raise`` re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a ``RuntimeError`` exception is raised indicating\nthat this is an error.\n\nOtherwise, ``raise`` evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n``BaseException``. If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the ``__traceback__`` attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the ``with_traceback()`` exception method (which\nreturns the same exception instance, with its traceback set to its\nargument), like so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe ``from`` clause is used for exception chaining: if given, the\nsecond *expression* must be another exception class or instance, which\nwill then be attached to the raised exception as the ``__cause__``\nattribute (which is writable). If the raised exception is not\nhandled, both exceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s ``__context__`` attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', 'return': '\nThe ``return`` statement\n************************\n\n return_stmt ::= "return" [expression_list]\n\n``return`` may only occur syntactically nested in a function\ndefinition, not within a nested class definition.\n\nIf an expression list is present, it is evaluated, else ``None`` is\nsubstituted.\n\n``return`` leaves the current function call with the expression list\n(or ``None``) as return value.\n\nWhen ``return`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the function.\n\nIn a generator function, the ``return`` statement indicates that the\ngenerator is done and will cause ``StopIteration`` to be raised. The\nreturned value (if any) is used as an argument to construct\n``StopIteration`` and becomes the ``StopIteration.value`` attribute.\n', 'sequence-types': "\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``get()``,\n``clear()``, ``setdefault()``, ``pop()``, ``popitem()``, ``copy()``,\nand ``update()`` behaving similar to those for Python's standard\ndictionary objects. The ``collections`` module provides a\n``MutableMapping`` abstract base class to help create those methods\nfrom a base set of ``__getitem__()``, ``__setitem__()``,\n``__delitem__()``, and ``keys()``. Mutable sequences should provide\nmethods ``append()``, ``count()``, ``index()``, ``extend()``,\n``insert()``, ``pop()``, ``remove()``, ``reverse()`` and ``sort()``,\nlike Python standard list objects. Finally, sequence types should\nimplement addition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods ``__add__()``, ``__radd__()``,\n``__iadd__()``, ``__mul__()``, ``__rmul__()`` and ``__imul__()``\ndescribed below; they should not define other numerical operators. It\nis recommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should search the mapping's keys; for\nsequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``keys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn't define a ``__bool__()`` method and whose ``__len__()``\n method returns zero is considered to be false in a Boolean context.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with\n ``None``.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``keys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don't define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n", 'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2,n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,n)``.\n\nNote: In the current implementation, the right-hand operand is required to\n be at most ``sys.maxsize``. If the right-hand operand is larger\n than ``sys.maxsize`` an ``OverflowError`` exception is raised.\n', 'slicings': '\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary must evaluate\nto a mapping object, and it is indexed (using the same\n``__getitem__()`` method as normal subscription) with a key that is\nconstructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of a proper slice is a\nslice object (see section *The standard type hierarchy*) whose\n``start``, ``stop`` and ``step`` attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n', 'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n [<class \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property being\n one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase), or "Lt"\n (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n', 'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``type(x).__getitem__(x,\ni)``. Except where mentioned, attempts to execute an operation raise\nan exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_info()[2]`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.last_traceback``. Circular references which are garbage are\n detected when the option cycle detector is enabled (it\'s on by\n default), but can only be cleaned up if there are no Python-\n level ``__del__()`` methods involved. Refer to the documentation\n for the ``gc`` module for more information about how\n ``__del__()`` methods are handled by the cycle detector,\n particularly the description of the ``garbage`` value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function to compute the\n "official" string representation of an object. If at all possible,\n this should look like a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n ``<...some useful description...>`` should be returned. The return\n value must be a string object. If a class defines ``__repr__()``\n but not ``__str__()``, then ``__repr__()`` is also used when an\n "informal" string representation of instances of that class is\n required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by ``str(object)`` and the built-in functions ``format()``\n and ``print()`` to compute the "informal" or nicely printable\n string representation of an object. The return value must be a\n *string* object.\n\n This method differs from ``object.__repr__()`` in that there is no\n expectation that ``__str__()`` return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type ``object``\n calls ``object.__repr__()``.\n\nobject.__bytes__(self)\n\n Called by ``bytes()`` to compute a byte-string representation of an\n object. This should return a ``bytes`` object.\n\nobject.__format__(self, format_spec)\n\n Called by the ``format()`` built-in function (and by extension, the\n ``str.format()`` method of class ``str``) to produce a "formatted"\n string representation of an object. The ``format_spec`` argument is\n a string that contains a description of the formatting options\n desired. The interpretation of the ``format_spec`` argument is up\n to the type implementing ``__format__()``, however most classes\n will either delegate formatting to one of the built-in types, or\n use a similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` calls\n ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and ``x>=y`` calls\n ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define an ``__eq__()`` method it should not\n define a ``__hash__()`` operation either; if it defines\n ``__eq__()`` but not ``__hash__()``, its instances will not be\n usable as items in hashable collections. If a class defines\n mutable objects and implements an ``__eq__()`` method, it should\n not implement ``__hash__()``, since the implementation of hashable\n collections requires that a key\'s hash value is immutable (if the\n object\'s hash value changes, it will be in the wrong hash bucket).\n\n User-defined classes have ``__eq__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns an appropriate value such\n that ``x == y`` implies both that ``x is y`` and ``hash(x) ==\n hash(y)``.\n\n A class that overrides ``__eq__()`` and does not define\n ``__hash__()`` will have its ``__hash__()`` implicitly set to\n ``None``. When the ``__hash__()`` method of a class is ``None``,\n instances of the class will raise an appropriate ``TypeError`` when\n a program attempts to retrieve their hash value, and will also be\n correctly identified as unhashable when checking ``isinstance(obj,\n collections.Hashable``).\n\n If a class that overrides ``__eq__()`` needs to retain the\n implementation of ``__hash__()`` from a parent class, the\n interpreter must be told this explicitly by setting ``__hash__ =\n <ParentClass>.__hash__``.\n\n If a class that does not override ``__eq__()`` wishes to suppress\n hash support, it should include ``__hash__ = None`` in the class\n definition. A class which defines its own ``__hash__()`` that\n explicitly raises a ``TypeError`` would be incorrectly identified\n as hashable by an ``isinstance(obj, collections.Hashable)`` call.\n\n Note: By default, the ``__hash__()`` values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also ``PYTHONHASHSEED``.\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``. When this method\n is not defined, ``__len__()`` is called, if it is defined, and the\n object is considered true if its result is nonzero. If a class\n defines neither ``__len__()`` nor ``__bool__()``, all its instances\n are considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when ``dir()`` is called on the object. A sequence must be\n returned. ``dir()`` converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to an object instance, ``a.x`` is transformed into the\n call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a class, ``A.x`` is transformed into the call:\n ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``int``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using ``type()``. The class body\nis executed in a new namespace and the class name is bound locally to\nthe result of ``type(name, bases, namespace)``.\n\nThe class creation process can be customised by passing the\n``metaclass`` keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both ``MyClass`` and ``MySubclass`` are\ninstances of ``Meta``:\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then ``type()`` is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n ``type()``, then it is used directly as the metaclass\n\n* if an instance of ``type()`` is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. ``type(cls)``) of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with ``TypeError``.\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a ``__prepare__``\nattribute, it is called as ``namespace = metaclass.__prepare__(name,\nbases, **kwds)`` (where the additional keyword arguments, if any, come\nfrom the class definition).\n\nIf the metaclass has no ``__prepare__`` attribute, then the class\nnamespace is initialised as an empty ``dict()`` instance.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3000\n Introduced the ``__prepare__`` namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as ``exec(body, globals(),\nnamespace)``. The key difference from a normal call to ``exec()`` is\nthat lexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling ``metaclass(name, bases,\nnamespace, **kwds)`` (the additional keywords passed here are the same\nas those passed to ``__prepare__``).\n\nThis class object is the one that will be referenced by the zero-\nargument form of ``super()``. ``__class__`` is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either ``__class__`` or ``super``. This allows the zero argument\nform of ``super()`` to correctly identify the class being defined\nbased on lexical scoping, while the class or instance that was used to\nmake the current call is identified based on the first argument passed\nto the method.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also:\n\n **PEP 3135** - New super\n Describes the implicit ``__class__`` closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n``collections.OrderedDict`` to remember the order that class members\nwere defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s ``__prepare__()`` method which returns an\nempty ``collections.OrderedDict``. That mapping records the methods\nand attributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s ``__new__()`` method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called ``members``.\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``get()``,\n``clear()``, ``setdefault()``, ``pop()``, ``popitem()``, ``copy()``,\nand ``update()`` behaving similar to those for Python\'s standard\ndictionary objects. The ``collections`` module provides a\n``MutableMapping`` abstract base class to help create those methods\nfrom a base set of ``__getitem__()``, ``__setitem__()``,\n``__delitem__()``, and ``keys()``. Mutable sequences should provide\nmethods ``append()``, ``count()``, ``index()``, ``extend()``,\n``insert()``, ``pop()``, ``remove()``, ``reverse()`` and ``sort()``,\nlike Python standard list objects. Finally, sequence types should\nimplement addition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods ``__add__()``, ``__radd__()``,\n``__iadd__()``, ``__mul__()``, ``__rmul__()`` and ``__imul__()``\ndescribed below; they should not define other numerical operators. It\nis recommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should search the mapping\'s keys; for\nsequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``keys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__bool__()`` method and whose ``__len__()``\n method returns zero is considered to be false in a Boolean context.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with\n ``None``.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``keys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``). For instance, to evaluate the expression ``x + y``, where\n *x* is an instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()``. Note that\n ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``) with reflected (swapped) operands. These functions are only\n called if the left operand does not support the corresponding\n operation and the operands are of different types. [2] For\n instance, to evaluate the expression ``x - y``, where *y* is an\n instance of a class that has an ``__rsub__()`` method,\n ``y.__rsub__(x)`` is called if ``x.__sub__(y)`` returns\n *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``float()`` and ``round()``. Should return a value of\n the appropriate type.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing, or in the\n built-in ``bin()``, ``hex()`` and ``oct()`` functions). Must return\n an integer.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n', 'string-methods': '\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see ``str.format()``,\n*Format String Syntax* and *String Formatting*) and the other based on\nC ``printf`` style formatting that handles a narrower range of types\nand is slightly harder to use correctly, but is often faster for the\ncases it can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the ``re`` module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter ``\'\xc3\x9f\'`` is equivalent to\n ``"ss"``. Since it is already lowercase, ``lower()`` would do\n nothing to ``\'\xc3\x9f\'``; ``casefold()`` converts it to ``"ss"``.\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is ``\'utf-8\'``. *errors* may be given to set a different\n error handling scheme. The default for *errors* is ``\'strict\'``,\n meaning that encoding errors raise a ``UnicodeError``. Other\n possible values are ``\'ignore\'``, ``\'replace\'``,\n ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and any other name\n registered via ``codecs.register_error()``, see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by zero or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to ``str.format(**mapping)``, except that ``mapping`` is\n used directly and not copied to a ``dict`` . This is useful if for\n example ``mapping`` is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character\n ``c`` is alphanumeric if one of the following returns ``True``:\n ``c.isalpha()``, ``c.isdecimal()``, ``c.isdigit()``, or\n ``c.isnumeric()``.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when ``repr()`` is\n invoked on a string. It has no bearing on the handling of strings\n written to ``sys.stdout`` or ``sys.stderr``.)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A ``TypeError`` will be raised if there are\n any non-string values in *iterable*, including ``bytes`` objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n ``str.translate()``.\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified or ``-1``, then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, ``\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()`` returns\n ``[\'ab c\', \'\', \'de fg\', \'kl\']``, while the same call with\n ``splitlines(True)`` returns ``[\'ab c\\n\', \'\\n\', \'de fg\\r\',\n \'kl\\r\\n\']``.\n\n Unlike ``split()`` when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n ``s.swapcase().swapcase() == s``.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or ``None``. Unmapped\n characters are left untouched. Characters mapped to ``None`` are\n deleted.\n\n You can use ``str.maketrans()`` to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see\n ``encodings.cp1251`` for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n', 'strings': '\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n stringescapeseq ::= "\\" <any source character>\n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= <any ASCII character except "\\" or newline or the quote>\n longbyteschar ::= <any ASCII character except "\\">\n bytesescapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the ``stringprefix`` or\n``bytesprefix`` and the rest of the literal. The source character set\nis defined by the encoding declaration; it is UTF-8 if no encoding\ndeclaration is given in the source file; see section *Encoding\ndeclarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes (``\'``) or double quotes (``"``). They can also be\nenclosed in matching groups of three single or double quotes (these\nare generally referred to as *triple-quoted strings*). The backslash\n(``\\``) character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with ``\'b\'`` or ``\'B\'``; they\nproduce an instance of the ``bytes`` type instead of the ``str`` type.\nThey may only contain ASCII characters; bytes with a numeric value of\n128 or greater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n``u`` prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter ``\'r\'`` or ``\'R\'``; such strings are called *raw strings* and\ntreat backslashes as literal characters. As a result, in string\nliterals, ``\'\\U\'`` and ``\'\\u\'`` escapes in raw strings are not treated\nspecially. Given that Python 2.x\'s raw unicode literals behave\ndifferently than Python 3.x\'s the ``\'ur\'`` syntax is not supported.\n\n New in version 3.3: The ``\'rb\'`` prefix of raw bytes literals has\n been added as a synonym of ``\'br\'``.\n\n New in version 3.3: Support for the unicode legacy literal\n (``u\'value\'``) was reintroduced to simplify the maintenance of dual\n Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\N{name}`` | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the byte\n with the given value. In a string literal, these escapes denote a\n Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight hex\n digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, ``r"\\""`` is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; ``r"\\"`` is not a valid string literal (even a raw\nstring cannot end in an odd number of backslashes). Specifically, *a\nraw string cannot end in a single backslash* (since the backslash\nwould escape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n', 'subscriptions': '\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription,\ne.g. a list or dictionary. User-defined objects can support\nsubscription by defining a ``__getitem__()`` method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a ``__getitem__()``\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that ``x[-1]`` selects the last item of\n``x``). The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero). Since the support\nfor negative indices and slicing occurs in the object\'s\n``__getitem__()`` method, subclasses overriding this method will need\nto explicitly add that support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': "\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0.0``, ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__bool__()`` or ``__len__()`` method, when that method returns the\n integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n", 'try': '\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception it is re-raised at the end of\nthe ``finally`` clause. If the ``finally`` clause raises another\nexception, the saved exception is set as the context of the new\nexception. If the ``finally`` clause executes a ``return`` or\n``break`` statement, the saved exception is discarded:\n\n def f():\n try:\n 1/0\n finally:\n return 42\n\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n', 'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal ``...`` or the\n built-in name ``Ellipsis``. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers (``int``)\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans (``bool``)\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of the integer\n type, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex`` (``complex``)\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode\n codepoints. All the codepoints in range ``U+0000 - U+10FFFF``\n can be represented in a string. Python doesn\'t have a\n ``chr`` type, and every character in the string is\n represented as a string object with length ``1``. The built-\n in function ``ord()`` converts a character to its codepoint\n (as an integer); ``chr()`` converts an integer in range ``0 -\n 10FFFF`` to the corresponding character. ``str.encode()`` can\n be used to convert a ``str`` to ``bytes`` using the given\n encoding, and ``bytes.decode()`` can be used to achieve the\n opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like ``b\'abc\'``) and the built-in function\n ``bytes()`` can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the ``decode()``\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type, as does the ``collections`` module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm.ndbm`` and ``dbm.gnu`` provide\n additional examples of mapping types, as does the\n ``collections`` module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | ``__doc__`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +---------------------------+---------------------------------+-------------+\n | ``__name__`` | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | ``__qualname__`` | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | ``__defaults__`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +---------------------------+---------------------------------+-------------+\n | ``__code__`` | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | ``__globals__`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | ``__dict__`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | ``__closure__`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | ``__annotations__`` | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | or ``\'return\'`` for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | ``__kwdefaults__`` | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: ``__self__`` is the class instance\n object, ``__func__`` is the function object; ``__doc__`` is the\n method\'s documentation (same as ``__func__.__doc__``);\n ``__name__`` is the method name (same as ``__func__.__name__``);\n ``__module__`` is the name of the module the method was defined\n in, or ``None`` if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its ``__self__`` attribute is the instance, and the method\n object is said to be bound. The new method\'s ``__func__``\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``__func__``\n attribute of the new instance is not the original method object\n but its ``__func__`` attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its ``__self__``\n attribute is the class itself, and its ``__func__`` attribute is\n the function object underlying the class method.\n\n When an instance method object is called, the underlying\n function (``__func__``) is called, inserting the class instance\n (``__self__``) in front of the argument list. For instance,\n when ``C`` is a class which contains a definition for a function\n ``f()``, and ``x`` is an instance of ``C``, calling ``x.f(1)``\n is equivalent to calling ``C.f(x, 1)``.\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in ``__self__`` will\n actually be the class itself, so that calling either ``x.f(1)``\n or ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``iterator__next__()`` method will cause the\n function to execute until it provides a value using the\n ``yield`` statement. When the function executes a ``return``\n statement or falls off the end, a ``StopIteration`` exception is\n raised and the iterator will have reached the end of the set of\n values to be returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override ``__new__()``. The arguments of the\n call are passed to ``__new__()`` and, in the typical case, to\n ``__init__()`` to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a ``__call__()`` method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the ``import``\n statement (see ``import``), or by calling functions such as\n ``importlib.import_module()`` and built-in ``__import__()``. A\n module object has a namespace implemented by a dictionary object\n (this is the dictionary referenced by the ``__globals__`` attribute\n of functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., ``m.x`` is\n equivalent to ``m.__dict__["x"]``. A module object does not contain\n the code object used to initialize the module (since it isn\'t\n needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute may be missing for certain types of modules,\n such as C modules that are statically linked into the interpreter;\n for extension modules loaded dynamically from a shared library, it\n is the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., ``C.x`` is translated to\n ``C.__dict__["x"]`` (although there are a number of hooks which\n allow for other means of locating attributes). When the attribute\n name is not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a class method object, it is transformed into an instance method\n object whose ``__self__`` attributes is ``C``. When it would yield\n a static method object, it is transformed into the object wrapped\n by the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its ``__dict__``.\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose ``__self__`` attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s\n ``__dict__``. If no class attribute is found, and the object\'s\n class has a ``__getattr__()`` method, that is called to satisfy the\n lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the ``open()`` built-in function,\n and also ``os.popen()``, ``os.fdopen()``, and the ``makefile()``\n method of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects ``sys.stdin``, ``sys.stdout`` and ``sys.stderr`` are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n ``io.TextIOBase`` abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_lasti`` gives the precise instruction (this is an index into\n the bytecode string of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_lineno`` is the current line number\n of the frame --- writing to this from within a trace function\n jumps to the given line (only for the bottom-most frame). A\n debugger can implement a Jump command (aka Set Next Statement)\n by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by ``sys.exc_info()``. When the program contains\n no suitable handler, the stack trace is written (nicely\n formatted) to the standard error stream; if the interpreter is\n interactive, it is also made available to the user as\n ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for ``__getitem__()``\n methods. They are also created by the built-in ``slice()``\n function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', 'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', 'typesmapping': '\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterator*\n object. Each item in the iterable must itself be an iterator with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to ``{"one": 1, "two": 2, "three": 3}``:\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n If a subclass of dict defines a method ``__missing__()``, if the\n key *key* is not present, the ``d[key]`` operation calls that\n method with the key *key* as argument. The ``d[key]`` operation\n then returns or raises whatever is returned or raised by the\n ``__missing__(key)`` call if the key is not present. No other\n operations or methods invoke ``__missing__()``. If\n ``__missing__()`` is not defined, ``KeyError`` is raised.\n ``__missing__()`` must be a method; it cannot be an instance\n variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See ``collections.Counter`` for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iter(d.keys())``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n items()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also:\n\n ``types.MappingProxyType`` can be used to create a read-only view\n of a ``dict``.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.keys()``, ``dict.values()`` and\n``dict.items()`` are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that ``(key, value)`` pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class ``collections.abc.Set`` are available (for example, ``==``,\n``<``, or ``^``).\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', 'typesmethods': '\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the\n``self`` argument to the argument list. Bound methods have two\nspecial read-only attributes: ``m.__self__`` is the object on which\nthe method operates, and ``m.__func__`` is the function implementing\nthe method. Calling ``m(arg-1, arg-2, ..., arg-n)`` is completely\nequivalent to calling ``m.__func__(m.__self__, arg-1, arg-2, ...,\narg-n)``.\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.__func__``), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an ``AttributeError`` being raised.\nIn order to set a method attribute, you need to explicitly set it on\nthe underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n', 'typesmodules': "\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special attribute of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ``<module\n'sys' (built-in)>``. If loaded from a file, they are written as\n``<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>``.\n", 'typesseq': '\nSequence Types --- ``list``, ``tuple``, ``range``\n*************************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The ``collections.abc.Sequence``\nABC is provided to make it easier to correctly implement these\noperations on custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe ``in`` and ``not in`` operations have the same priorities as the\ncomparison operations. The ``+`` (concatenation) and ``*``\n(repetition) operations have the same priority as the corresponding\nnumeric operations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+----------------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+----------------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| ``s * n`` or ``n * s`` | *n* shallow copies of *s* | (2)(7) |\n| | concatenated | |\n+----------------------------+----------------------------------+------------+\n| ``s[i]`` | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``s.index(x[, i[, j]])`` | index of the first occurence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| ``s.count(x)`` | total number of occurences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the ``in`` and ``not in`` operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as ``str``, ``bytes`` and ``bytearray``) also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. Concatenating immutable sequences always results in a new object.\n This means that building up a sequence by repeated concatenation\n will have a quadratic runtime cost in the total sequence length.\n To get a linear runtime cost, you must switch to one of the\n alternatives below:\n\n * if concatenating ``str`` objects, you can build a list and use\n ``str.join()`` at the end or else write to a ``io.StringIO``\n instance and retrieve its value when complete\n\n * if concatenating ``bytes`` objects, you can similarly use\n ``bytes.join()`` or ``io.BytesIO``, or you can do in-place\n concatenation with a ``bytearray`` object. ``bytearray`` objects\n are mutable and have an efficient overallocation mechanism\n\n * if concatenating ``tuple`` objects, extend a ``list`` instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as ``range``) only support item sequences\n that follow specific patterns, and hence don\'t support sequence\n concatenation or repetition.\n\n8. ``index`` raises ``ValueError`` when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using ``s[i:j].index(x)``,\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe ``hash()`` built-in.\n\nThis support allows immutable sequences, such as ``tuple`` instances,\nto be used as ``dict`` keys and stored in ``set`` and ``frozenset``\ninstances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in ``TypeError``.\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The ``collections.abc.MutableSequence`` ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, ``bytearray`` only\naccepts integers that meet the value restriction ``0 <= x <= 255``).\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | appends *x* to the end of the | |\n| | sequence (same as | |\n| | ``s[len(s):len(s)] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | removes all items from ``s`` | (5) |\n| | (same as ``del s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | creates a shallow copy of ``s`` | (5) |\n| | (same as ``s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(t)`` | extends *s* with the contents of | |\n| | *t* (same as ``s[len(s):len(s)] | |\n| | = t``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | ``s[i:i] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | remove the first item from *s* | (3) |\n| | where ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n3. ``remove`` raises ``ValueError`` when *x* is not found in *s*.\n\n4. The ``reverse()`` method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. ``clear()`` and ``copy()`` are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as ``dict`` and ``set``)\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: ``[]``\n\n * Using square brackets, separating items with commas: ``[a]``,\n ``[a, b, c]``\n\n * Using a list comprehension: ``[x for x in iterable]``\n\n * Using the type constructor: ``list()`` or ``list(iterable)``\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to ``iterable[:]``. For example, ``list(\'abc\')``\n returns ``[\'a\', \'b\', \'c\']`` and ``list( (1, 2, 3) )`` returns ``[1,\n 2, 3]``. If no argument is given, the constructor creates a new\n empty list, ``[]``.\n\n Many other operations also produce lists, including the\n ``sorted()`` built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only ``<``\n comparisons between items. Exceptions are not suppressed - if\n any comparison operations fail, the entire sort operation will\n fail (and the list will likely be left in a partially modified\n state).\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n ``key=str.lower``). The key corresponding to each item in the\n list is calculated once and then used for the entire sorting\n process. The default value of ``None`` means that list items are\n sorted directly without calculating a separate key value.\n\n The ``functools.cmp_to_key()`` utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n ``sorted()`` to explicitly request a new sorted list instance).\n\n The ``sort()`` method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises ``ValueError`` if it can\n detect that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the\n``enumerate()`` built-in). Tuples are also used for cases where an\nimmutable sequence of homogeneous data is needed (such as allowing\nstorage in a ``set`` or ``dict`` instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: ``()``\n\n * Using a trailing comma for a singleton tuple: ``a,`` or ``(a,)``\n\n * Separating items with commas: ``a, b, c`` or ``(a, b, c)``\n\n * Using the ``tuple()`` built-in: ``tuple()`` or\n ``tuple(iterable)``\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, ``tuple(\'abc\')`` returns ``(\'a\', \'b\',\n \'c\')`` and ``tuple( [1, 2, 3] )`` returns ``(1, 2, 3)``. If no\n argument is given, the constructor creates a new empty tuple,\n ``()``.\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, ``f(a, b, c)`` is a function call with three\n arguments, while ``f((a, b, c))`` is a function call with a 3-tuple\n as the sole argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, ``collections.namedtuple()`` may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe ``range`` type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in ``for`` loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in ``int`` or any object that implements the ``__index__``\n special method). If the *step* argument is omitted, it defaults to\n ``1``. If the *start* argument is omitted, it defaults to ``0``. If\n *step* is zero, ``ValueError`` is raised.\n\n For a positive *step*, the contents of a range ``r`` are determined\n by the formula ``r[i] = start + step*i`` where ``i >= 0`` and\n ``r[i] < stop``.\n\n For a negative *step*, the contents of the range are still\n determined by the formula ``r[i] = start + step*i``, but the\n constraints are ``i >= 0`` and ``r[i] > stop``.\n\n A range object will be empty if ``r[0]`` does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than ``sys.maxsize`` are\n permitted but some features (such as ``len()``) may raise\n ``OverflowError``.\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the ``range`` type over a regular ``list`` or\n``tuple`` is that a ``range`` object will always take the same (small)\namount of memory, no matter the size of the range it represents (as it\nonly stores the ``start``, ``stop`` and ``step`` values, calculating\nindividual items and subranges as needed).\n\nRange objects implement the ``collections.Sequence`` ABC, and provide\nfeatures such as containment tests, element index lookup, slicing and\nsupport for negative indices (see *Sequence Types --- list, tuple,\nrange*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with ``==`` and ``!=`` compares\nthem as sequences. That is, two range objects are considered equal if\nthey represent the same sequence of values. (Note that two range\nobjects that compare equal might have different ``start``, ``stop``\nand ``step`` attributes, for example ``range(0) == range(2, 1, 3)`` or\n``range(0, 3, 2) == range(0, 4, 2)``.)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test ``int`` objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The ``start``, ``stop`` and ``step`` attributes.\n', 'typesseq-mutable': "\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The ``collections.abc.MutableSequence`` ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, ``bytearray`` only\naccepts integers that meet the value restriction ``0 <= x <= 255``).\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | appends *x* to the end of the | |\n| | sequence (same as | |\n| | ``s[len(s):len(s)] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | removes all items from ``s`` | (5) |\n| | (same as ``del s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | creates a shallow copy of ``s`` | (5) |\n| | (same as ``s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(t)`` | extends *s* with the contents of | |\n| | *t* (same as ``s[len(s):len(s)] | |\n| | = t``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | ``s[i:i] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | remove the first item from *s* | (3) |\n| | where ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n3. ``remove`` raises ``ValueError`` when *x* is not found in *s*.\n\n4. The ``reverse()`` method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. ``clear()`` and ``copy()`` are included for consistency with the\n interfaces of mutable containers that don't support slicing\n operations (such as ``dict`` and ``set``)\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n", 'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of ``x`` is defined as\n``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n', 'while': '\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n', 'with': '\nThe ``with`` statement\n**********************\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'yield': '\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the ``next()`` function on\nthe generator repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of ``expression_list`` is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nThe ``yield`` statement is allowed in the ``try`` clause of a ``try``\n... ``finally`` construct. If the generator is not resumed before it\nis finalized (by reaching a zero reference count or by being garbage\ncollected), the generator-iterator\'s ``close()`` method will be\ncalled, allowing any pending ``finally`` clauses to execute.\n\nWhen ``yield from <expr>`` is used, it treats the supplied expression\nas a subiterator, producing values from it until the underlying\niterator is exhausted.\n\n Changed in version 3.3: Added ``yield from <expr>`` to delegate\n control flow to a subiterator\n\nFor full details of ``yield`` semantics, refer to the *Yield\nexpressions* section.\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal to enhance the API and syntax of generators, making\n them usable as simple coroutines.\n\n **PEP 0380** - Syntax for Delegating to a Subgenerator\n The proposal to introduce the ``yield_from`` syntax, making\n delegation to sub-generators easy.\n'}
agpl-3.0
kumarkrishna/sympy
sympy/logic/algorithms/dpll.py
58
9316
"""Implementation of DPLL algorithm Further improvements: eliminate calls to pl_true, implement branching rules, efficient unit propagation. References: - http://en.wikipedia.org/wiki/DPLL_algorithm - http://bioinformatics.louisville.edu/ouyang/MingOuyangThesis.pdf """ from __future__ import print_function, division from sympy.core.compatibility import range from sympy import default_sort_key from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \ to_int_repr, _find_predicates from sympy.logic.inference import pl_true, literal_symbol def dpll_satisfiable(expr): """ Check satisfiability of a propositional sentence. It returns a model rather than True when it succeeds >>> from sympy.abc import A, B >>> from sympy.logic.algorithms.dpll import dpll_satisfiable >>> dpll_satisfiable(A & ~B) {A: True, B: False} >>> dpll_satisfiable(A & ~A) False """ clauses = conjuncts(to_cnf(expr)) if False in clauses: return False symbols = sorted(_find_predicates(expr), key=default_sort_key) symbols_int_repr = set(range(1, len(symbols) + 1)) clauses_int_repr = to_int_repr(clauses, symbols) result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {}) if not result: return result output = {} for key in result: output.update({symbols[key - 1]: result[key]}) return output def dpll(clauses, symbols, model): """ Compute satisfiability in a partial model. Clauses is an array of conjuncts. >>> from sympy.abc import A, B, D >>> from sympy.logic.algorithms.dpll import dpll >>> dpll([A, B, D], [A, B], {D: False}) False """ # compute DP kernel P, value = find_unit_clause(clauses, model) while P: model.update({P: value}) symbols.remove(P) if not value: P = ~P clauses = unit_propagate(clauses, P) P, value = find_unit_clause(clauses, model) P, value = find_pure_symbol(symbols, clauses) while P: model.update({P: value}) symbols.remove(P) if not value: P = ~P clauses = unit_propagate(clauses, P) P, value = find_pure_symbol(symbols, clauses) # end DP kernel unknown_clauses = [] for c in clauses: val = pl_true(c, model) if val is False: return False if val is not True: unknown_clauses.append(c) if not unknown_clauses: return model if not clauses: return model P = symbols.pop() model_copy = model.copy() model.update({P: True}) model_copy.update({P: False}) symbols_copy = symbols[:] return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy)) def dpll_int_repr(clauses, symbols, model): """ Compute satisfiability in a partial model. Arguments are expected to be in integer representation >>> from sympy.logic.algorithms.dpll import dpll_int_repr >>> dpll_int_repr([set([1]), set([2]), set([3])], set([1, 2]), {3: False}) False """ # compute DP kernel P, value = find_unit_clause_int_repr(clauses, model) while P: model.update({P: value}) symbols.remove(P) if not value: P = -P clauses = unit_propagate_int_repr(clauses, P) P, value = find_unit_clause_int_repr(clauses, model) P, value = find_pure_symbol_int_repr(symbols, clauses) while P: model.update({P: value}) symbols.remove(P) if not value: P = -P clauses = unit_propagate_int_repr(clauses, P) P, value = find_pure_symbol_int_repr(symbols, clauses) # end DP kernel unknown_clauses = [] for c in clauses: val = pl_true_int_repr(c, model) if val is False: return False if val is not True: unknown_clauses.append(c) if not unknown_clauses: return model P = symbols.pop() model_copy = model.copy() model.update({P: True}) model_copy.update({P: False}) symbols_copy = symbols.copy() return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy)) ### helper methods for DPLL def pl_true_int_repr(clause, model={}): """ Lightweight version of pl_true. Argument clause represents the set of args of an Or clause. This is used inside dpll_int_repr, it is not meant to be used directly. >>> from sympy.logic.algorithms.dpll import pl_true_int_repr >>> pl_true_int_repr(set([1, 2]), {1: False}) >>> pl_true_int_repr(set([1, 2]), {1: False, 2: False}) False """ result = False for lit in clause: if lit < 0: p = model.get(-lit) if p is not None: p = not p else: p = model.get(lit) if p is True: return True elif p is None: result = None return result def unit_propagate(clauses, symbol): """ Returns an equivalent set of clauses If a set of clauses contains the unit clause l, the other clauses are simplified by the application of the two following rules: 1. every clause containing l is removed 2. in every clause that contains ~l this literal is deleted Arguments are expected to be in CNF. >>> from sympy import symbols >>> from sympy.abc import A, B, D >>> from sympy.logic.algorithms.dpll import unit_propagate >>> unit_propagate([A | B, D | ~B, B], B) [D, B] """ output = [] for c in clauses: if c.func != Or: output.append(c) continue for arg in c.args: if arg == ~symbol: output.append(Or(*[x for x in c.args if x != ~symbol])) break if arg == symbol: break else: output.append(c) return output def unit_propagate_int_repr(clauses, s): """ Same as unit_propagate, but arguments are expected to be in integer representation >>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr >>> unit_propagate_int_repr([set([1, 2]), set([3, -2]), set([2])], 2) [set([3])] """ negated = set([-s]) return [clause - negated for clause in clauses if s not in clause] def find_pure_symbol(symbols, unknown_clauses): """ Find a symbol and its value if it appears only as a positive literal (or only as a negative) in clauses. >>> from sympy import symbols >>> from sympy.abc import A, B, D >>> from sympy.logic.algorithms.dpll import find_pure_symbol >>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A]) (A, True) """ for sym in symbols: found_pos, found_neg = False, False for c in unknown_clauses: if not found_pos and sym in disjuncts(c): found_pos = True if not found_neg and Not(sym) in disjuncts(c): found_neg = True if found_pos != found_neg: return sym, found_pos return None, None def find_pure_symbol_int_repr(symbols, unknown_clauses): """ Same as find_pure_symbol, but arguments are expected to be in integer representation >>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr >>> find_pure_symbol_int_repr(set([1,2,3]), ... [set([1, -2]), set([-2, -3]), set([3, 1])]) (1, True) """ all_symbols = set().union(*unknown_clauses) found_pos = all_symbols.intersection(symbols) found_neg = all_symbols.intersection([-s for s in symbols]) for p in found_pos: if -p not in found_neg: return p, True for p in found_neg: if -p not in found_pos: return -p, False return None, None def find_unit_clause(clauses, model): """ A unit clause has only 1 variable that is not bound in the model. >>> from sympy import symbols >>> from sympy.abc import A, B, D >>> from sympy.logic.algorithms.dpll import find_unit_clause >>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True}) (B, False) """ for clause in clauses: num_not_in_model = 0 for literal in disjuncts(clause): sym = literal_symbol(literal) if sym not in model: num_not_in_model += 1 P, value = sym, not (literal.func is Not) if num_not_in_model == 1: return P, value return None, None def find_unit_clause_int_repr(clauses, model): """ Same as find_unit_clause, but arguments are expected to be in integer representation. >>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr >>> find_unit_clause_int_repr([set([1, 2, 3]), ... set([2, -3]), set([1, -2])], {1: True}) (2, False) """ bound = set(model) | set(-sym for sym in model) for clause in clauses: unbound = clause - bound if len(unbound) == 1: p = unbound.pop() if p < 0: return -p, False else: return p, True return None, None
bsd-3-clause
agileblaze/OpenStackTwoFactorAuthentication
horizon/openstack_dashboard/test/integration_tests/tests/test_image_create_delete.py
52
1470
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack_dashboard.test.integration_tests import helpers class TestImage(helpers.TestCase): IMAGE_NAME = helpers.gen_random_resource_name("image") def test_image_create_delete(self): """tests the image creation and deletion functionalities: * creates a new image from horizon.conf http_image * verifies the image appears in the images table as active * deletes the newly created image * verifies the image does not appear in the table after deletion """ images_page = self.home_pg.go_to_compute_imagespage() images_page.create_image(self.IMAGE_NAME) self.assertTrue(images_page.is_image_present(self.IMAGE_NAME)) self.assertTrue(images_page.is_image_active(self.IMAGE_NAME)) images_page.delete_image(self.IMAGE_NAME) self.assertFalse(images_page.is_image_present(self.IMAGE_NAME))
apache-2.0
GbalsaC/bitnamiP
docs/en_us/enrollment_api/source/conf.py
62
2902
# -*- coding: utf-8 -*- # pylint: disable=invalid-name # pylint: disable=redefined-builtin # pylint: disable=protected-access # pylint: disable=unused-argument import os from path import path import sys import mock MOCK_MODULES = [ 'ipware', 'ip', 'ipware.ip', 'get_ip', 'pygeoip', 'ipaddr', 'django_countries', 'fields', 'django_countries.fields', 'opaque_keys', 'opaque_keys.edx', 'opaque_keys.edx.keys', 'CourseKey', 'UsageKey', 'BlockTypeKey', 'opaque_keys.edx.locations', 'SlashSeparatedCourseKey', 'Location', 'opaque_keys.edx.locator', 'Locator', 'south', 'modelsinspector', 'south.modelsinspector', 'add_introspection_rules' ] for mod_name in MOCK_MODULES: sys.modules[mod_name] = mock.Mock() on_rtd = os.environ.get('READTHEDOCS', None) == 'True' sys.path.append('../../../../') from docs.shared.conf import * # Add any paths that contain templates here, relative to this directory. #templates_path.append('source/_templates') # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path.append('source/_static') if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. root = path('../../../../').abspath() sys.path.insert(0, root) sys.path.append(root / "common/djangoapps") sys.path.append('.') #sys.path.insert( # 0, # os.path.abspath( # os.path.normpath( # os.path.dirname(__file__) + '/../../../..' # ) # ) #) # django configuration - careful here if on_rtd: os.environ['DJANGO_SETTINGS_MODULE'] = 'lms' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'lms' # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['build', 'links.rst'] project = u'edX Enrollment API Version 1' copyright = u'2015, edX'
agpl-3.0
rshorey/moxie
moxie/butterfield.py
4
3490
import os import json import asyncio from butterfield.utils import at_bot from moxie.facts import get_fact from aiodocker import Docker from aiocore import EventService WEB_ROOT = os.environ.get("MOXIE_WEB_URL", "http://localhost:8888") class LogService(EventService): """ Provide basic text logging using print() """ identifier = "moxie.cores.log.LogService" FORMAT_STRINGS = { "cron": { "sleep": "{job} ready to run, launching in {time} seconds.", }, "run": { "pull": "Pulling from the index for {job}", "error": "Error! {job} - {error}", "create": "Creating a container for {job}", "starting": "Starting {job} because {why}", "started": "Job {{job}} started! ({}/container/{{job}}/) because {{why}}".format(WEB_ROOT), }, "reap": { "error": "Error! {job} - {error}", "punted": "Error! Internal problem, punting {job}", "start": "Reaping {job}", "complete": "Job {{job}} reaped - run ID {{record}} ({}/run/{{record}}/)".format(WEB_ROOT), }, } def __init__(self, bot, *args, **kwargs): self.bot = bot super(LogService, self).__init__(*args, **kwargs) @asyncio.coroutine def log(self, message): yield from self.send(message) @asyncio.coroutine def handle(self, message): type_, action = [message.get(x) for x in ['type', 'action']] strings = self.FORMAT_STRINGS.get(type_, {}) output = strings.get(action, str(message)) yield from self.bot.post( "#cron", "[{type}]: {action} - {message}".format( type=message['type'], action=message['action'], message=output.format(**message), )) @asyncio.coroutine def events(bot): docker = Docker() events = docker.events events.saferun() stream = events.listen() while True: el = yield from stream.get() yield from bot.post("#cron", "`{}`".format(str(el))) @asyncio.coroutine @at_bot def run(bot, message: "message"): runner = EventService.resolve("moxie.cores.run.RunService") text = message.get("text", "") if text == "": yield from bot.post(message['channel'], "Invalid request") return elif text.strip().lower() == "fact": yield from bot.post( message['channel'], "<@{}>: {}".format(message['user'], get_fact())) return elif text.strip().lower() in ("yo", ":yo:"): yield from bot.post( message['channel'], "<@{}>: :yo:".format(message['user'])) return cmd, arg = text.split(" ", 1) if cmd == "run": job = arg yield from bot.post( message['channel'], "<@{}>: Doing bringup of {}".format( message['user'], job)) try: yield from runner.run( job, 'slack from <@{}>'.format(message['user']) ) except ValueError as e: yield from bot.post( message['channel'], "<@{user}>: Gah, {job} failed - {e}".format( user=message['user'], e=e, job=job) ) return yield from bot.post(message['channel'], "<@{user}>: job {job} online - {webroot}/container/{job}/".format( user=message['user'], webroot=WEB_ROOT, job=job))
mit
vrutkovs/atomic-reactor
atomic_reactor/plugins/exit_koji_tag_build.py
3
3493
""" Copyright (c) 2017 Red Hat, Inc All rights reserved. This software may be modified and distributed under the terms of the BSD license. See the LICENSE file for details. """ from __future__ import unicode_literals from atomic_reactor.constants import PLUGIN_KOJI_TAG_BUILD_KEY from atomic_reactor.koji_util import create_koji_session, tag_koji_build from atomic_reactor.plugin import ExitPlugin from atomic_reactor.plugins.exit_koji_import import KojiImportPlugin from atomic_reactor.plugins.exit_koji_promote import KojiPromotePlugin class KojiTagBuildPlugin(ExitPlugin): """ Tag build in koji Authentication is with Kerberos unless the koji_ssl_certs configuration parameter is given, in which case it should be a path at which 'cert', 'ca', and 'serverca' are the certificates for SSL authentication. If Kerberos is used for authentication, the default principal will be used (from the kernel keyring) unless both koji_keytab and koji_principal are specified. The koji_keytab parameter is a keytab name like 'type:name', and so can be used to specify a key in a Kubernetes secret by specifying 'FILE:/path/to/key'. """ key = PLUGIN_KOJI_TAG_BUILD_KEY is_allowed_to_fail = False def __init__(self, tasker, workflow, kojihub, target, koji_ssl_certs=None, koji_proxy_user=None, koji_principal=None, koji_keytab=None, poll_interval=5): """ constructor :param tasker: DockerTasker instance :param workflow: DockerBuildWorkflow instance :param kojihub: string, koji hub (xmlrpc) :param target: str, koji target :param koji_ssl_certs: str, path to 'cert', 'ca', 'serverca' :param koji_proxy_user: str, user to log in as (requires hub config) :param koji_principal: str, Kerberos principal (must specify keytab) :param koji_keytab: str, keytab name (must specify principal) :param poll_interval: int, seconds between Koji task status requests """ super(KojiTagBuildPlugin, self).__init__(tasker, workflow) if bool(koji_principal) != bool(koji_keytab): raise RuntimeError('specify both koji_principal and koji_keytab ' 'or neither') self.kojihub = kojihub self.koji_auth = { "proxyuser": koji_proxy_user, "ssl_certs_dir": koji_ssl_certs, # krbV python library throws an error if these are unicode "krb_principal": str(koji_principal), "krb_keytab": str(koji_keytab) } self.target = target self.poll_interval = poll_interval def run(self): """ Run the plugin. """ if self.workflow.build_process_failed: self.log.info('Build failed, skipping koji tagging') return build_id = self.workflow.exit_results.get(KojiImportPlugin.key) if not build_id: build_id = self.workflow.exit_results.get(KojiPromotePlugin.key) if not build_id: self.log.info('No koji build from %s or %s', KojiImportPlugin.key, KojiPromotePlugin.key) return session = create_koji_session(self.kojihub, self.koji_auth) build_tag = tag_koji_build(session, build_id, self.target, poll_interval=self.poll_interval) return build_tag
bsd-3-clause
yashodhank/erpnext
erpnext/setup/setup_wizard/setup_wizard.py
3
18633
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe, copy import os import json from frappe.utils import cstr, flt, getdate from frappe import _ from frappe.utils.file_manager import save_file from .default_website import website_maker import install_fixtures from .sample_data import make_sample_data from erpnext.accounts.doctype.account.account import RootNotEditable from frappe.core.doctype.communication.comment import add_info_comment from erpnext.setup.setup_wizard.domainify import setup_domain def setup_complete(args=None): if frappe.db.sql("select name from tabCompany"): frappe.throw(_("Setup Already Complete!!")) install_fixtures.install(args.get("country")) create_price_lists(args) create_fiscal_year_and_company(args) create_sales_tax(args) create_users(args) set_defaults(args) create_territories() create_feed_and_todo() create_email_digest() create_letter_head(args) create_taxes(args) create_items(args) create_customers(args) create_suppliers(args) if args.domain.lower() == 'education': create_academic_year() create_academic_term() create_program(args) create_course(args) create_instructor(args) create_room(args) if args.get('setup_website'): website_maker(args) create_logo(args) frappe.local.message_log = [] setup_domain(args.get('domain')) frappe.db.commit() login_as_first_user(args) frappe.db.commit() frappe.clear_cache() if args.get("add_sample_data"): try: make_sample_data() frappe.clear_cache() except: # clear message if frappe.message_log: frappe.message_log.pop() pass def create_fiscal_year_and_company(args): if (args.get('fy_start_date')): curr_fiscal_year = get_fy_details(args.get('fy_start_date'), args.get('fy_end_date')) frappe.get_doc({ "doctype":"Fiscal Year", 'year': curr_fiscal_year, 'year_start_date': args.get('fy_start_date'), 'year_end_date': args.get('fy_end_date'), }).insert() args["curr_fiscal_year"] = curr_fiscal_year # Company if (args.get('company_name')): frappe.get_doc({ "doctype":"Company", 'company_name':args.get('company_name').strip(), 'abbr':args.get('company_abbr'), 'default_currency':args.get('currency'), 'country': args.get('country'), 'create_chart_of_accounts_based_on': 'Standard Template', 'chart_of_accounts': args.get(('chart_of_accounts')), 'domain': args.get('domain') }).insert() #Enable shopping cart enable_shopping_cart(args) # Bank Account create_bank_account(args) def enable_shopping_cart(args): frappe.get_doc({ "doctype": "Shopping Cart Settings", "enabled": 1, 'company': args.get('company_name').strip(), 'price_list': frappe.db.get_value("Price List", {"selling": 1}), 'default_customer_group': _("Individual"), 'quotation_series': "QTN-", }).insert() def create_bank_account(args): if args.get("bank_account"): company_name = args.get('company_name').strip() bank_account_group = frappe.db.get_value("Account", {"account_type": "Bank", "is_group": 1, "root_type": "Asset", "company": company_name}) if bank_account_group: bank_account = frappe.get_doc({ "doctype": "Account", 'account_name': args.get("bank_account"), 'parent_account': bank_account_group, 'is_group':0, 'company': company_name, "account_type": "Bank", }) try: return bank_account.insert() except RootNotEditable: frappe.throw(_("Bank account cannot be named as {0}").format(args.get("bank_account"))) except frappe.DuplicateEntryError: # bank account same as a CoA entry pass def create_price_lists(args): for pl_type, pl_name in (("Selling", _("Standard Selling")), ("Buying", _("Standard Buying"))): frappe.get_doc({ "doctype": "Price List", "price_list_name": pl_name, "enabled": 1, "buying": 1 if pl_type == "Buying" else 0, "selling": 1 if pl_type == "Selling" else 0, "currency": args["currency"] }).insert() def set_defaults(args): # enable default currency frappe.db.set_value("Currency", args.get("currency"), "enabled", 1) global_defaults = frappe.get_doc("Global Defaults", "Global Defaults") global_defaults.update({ 'current_fiscal_year': args.curr_fiscal_year, 'default_currency': args.get('currency'), 'default_company':args.get('company_name').strip(), "country": args.get("country"), }) global_defaults.save() frappe.db.set_value("System Settings", None, "email_footer_address", args.get("company")) accounts_settings = frappe.get_doc("Accounts Settings") accounts_settings.auto_accounting_for_stock = 1 accounts_settings.save() stock_settings = frappe.get_doc("Stock Settings") stock_settings.item_naming_by = "Item Code" stock_settings.valuation_method = "FIFO" stock_settings.default_warehouse = frappe.db.get_value('Warehouse', {'warehouse_name': _('Stores')}) stock_settings.stock_uom = _("Nos") stock_settings.auto_indent = 1 stock_settings.auto_insert_price_list_rate_if_missing = 1 stock_settings.automatically_set_serial_nos_based_on_fifo = 1 stock_settings.save() selling_settings = frappe.get_doc("Selling Settings") selling_settings.cust_master_name = "Customer Name" selling_settings.so_required = "No" selling_settings.dn_required = "No" selling_settings.save() buying_settings = frappe.get_doc("Buying Settings") buying_settings.supp_master_name = "Supplier Name" buying_settings.po_required = "No" buying_settings.pr_required = "No" buying_settings.maintain_same_rate = 1 buying_settings.save() notification_control = frappe.get_doc("Notification Control") notification_control.quotation = 1 notification_control.sales_invoice = 1 notification_control.purchase_order = 1 notification_control.save() hr_settings = frappe.get_doc("HR Settings") hr_settings.emp_created_by = "Naming Series" hr_settings.save() def create_feed_and_todo(): """update Activity feed and create todo for creation of item, customer, vendor""" add_info_comment(**{ "subject": _("ERPNext Setup Complete!") }) def create_email_digest(): from frappe.utils.user import get_system_managers system_managers = get_system_managers(only_name=True) if not system_managers: return companies = frappe.db.sql_list("select name FROM `tabCompany`") for company in companies: if not frappe.db.exists("Email Digest", "Default Weekly Digest - " + company): edigest = frappe.get_doc({ "doctype": "Email Digest", "name": "Default Weekly Digest - " + company, "company": company, "frequency": "Weekly", "recipient_list": "\n".join(system_managers) }) for df in edigest.meta.get("fields", {"fieldtype": "Check"}): if df.fieldname != "scheduler_errors": edigest.set(df.fieldname, 1) edigest.insert() # scheduler errors digest if companies: edigest = frappe.new_doc("Email Digest") edigest.update({ "name": "Scheduler Errors", "company": companies[0], "frequency": "Daily", "recipient_list": "\n".join(system_managers), "scheduler_errors": 1, "enabled": 1 }) edigest.insert() def get_fy_details(fy_start_date, fy_end_date): start_year = getdate(fy_start_date).year if start_year == getdate(fy_end_date).year: fy = cstr(start_year) else: fy = cstr(start_year) + '-' + cstr(start_year + 1) return fy def create_sales_tax(args): country_wise_tax = get_country_wise_tax(args.get("country")) if country_wise_tax and len(country_wise_tax) > 0: for sales_tax, tax_data in country_wise_tax.items(): make_tax_account_and_template(args.get("company_name").strip(), tax_data.get('account_name'), tax_data.get('tax_rate'), sales_tax) def get_country_wise_tax(country): data = {} with open (os.path.join(os.path.dirname(__file__), "data", "country_wise_tax.json")) as countrywise_tax: data = json.load(countrywise_tax).get(country) return data def create_taxes(args): for i in xrange(1,6): if args.get("tax_" + str(i)): # replace % in case someone also enters the % symbol tax_rate = cstr(args.get("tax_rate_" + str(i)) or "").replace("%", "") account_name = args.get("tax_" + str(i)) make_tax_account_and_template(args.get("company_name").strip(), account_name, tax_rate) def make_tax_account_and_template(company, account_name, tax_rate, template_name=None): try: account = make_tax_account(company, account_name, tax_rate) if account: make_sales_and_purchase_tax_templates(account, template_name) except frappe.NameError, e: if e.args[2][0]==1062: pass else: raise except RootNotEditable, e: pass def get_tax_account_group(company): tax_group = frappe.db.get_value("Account", {"account_name": "Duties and Taxes", "is_group": 1, "company": company}) if not tax_group: tax_group = frappe.db.get_value("Account", {"is_group": 1, "root_type": "Liability", "account_type": "Tax", "company": company}) return tax_group def make_tax_account(company, account_name, tax_rate): tax_group = get_tax_account_group(company) if tax_group: return frappe.get_doc({ "doctype":"Account", "company": company, "parent_account": tax_group, "account_name": account_name, "is_group": 0, "report_type": "Balance Sheet", "root_type": "Liability", "account_type": "Tax", "tax_rate": flt(tax_rate) if tax_rate else None }).insert(ignore_permissions=True) def make_sales_and_purchase_tax_templates(account, template_name=None): if not template_name: template_name = account.name sales_tax_template = { "doctype": "Sales Taxes and Charges Template", "title": template_name, "company": account.company, "taxes": [{ "category": "Valuation and Total", "charge_type": "On Net Total", "account_head": account.name, "description": "{0} @ {1}".format(account.account_name, account.tax_rate), "rate": account.tax_rate }] } # Sales frappe.get_doc(copy.deepcopy(sales_tax_template)).insert(ignore_permissions=True) # Purchase purchase_tax_template = copy.deepcopy(sales_tax_template) purchase_tax_template["doctype"] = "Purchase Taxes and Charges Template" frappe.get_doc(purchase_tax_template).insert(ignore_permissions=True) def create_items(args): for i in xrange(1,6): item = args.get("item_" + str(i)) if item: item_group = args.get("item_group_" + str(i)) is_sales_item = args.get("is_sales_item_" + str(i)) is_purchase_item = args.get("is_purchase_item_" + str(i)) is_stock_item = item_group!=_("Services") default_warehouse = "" if is_stock_item: default_warehouse = frappe.db.get_value("Warehouse", filters={ "warehouse_name": _("Finished Goods") if is_sales_item else _("Stores"), "company": args.get("company_name").strip() }) try: frappe.get_doc({ "doctype":"Item", "item_code": item, "item_name": item, "description": item, "show_in_website": 1, "is_sales_item": is_sales_item, "is_purchase_item": is_purchase_item, "is_stock_item": is_stock_item and 1 or 0, "item_group": item_group, "stock_uom": args.get("item_uom_" + str(i)), "default_warehouse": default_warehouse }).insert() if args.get("item_img_" + str(i)): item_image = args.get("item_img_" + str(i)).split(",") if len(item_image)==3: filename, filetype, content = item_image fileurl = save_file(filename, content, "Item", item, decode=True).file_url frappe.db.set_value("Item", item, "image", fileurl) if args.get("item_price_" + str(i)): item_price = flt(args.get("item_price_" + str(i))) if is_sales_item: price_list_name = frappe.db.get_value("Price List", {"selling": 1}) make_item_price(item, price_list_name, item_price) if is_purchase_item: price_list_name = frappe.db.get_value("Price List", {"buying": 1}) make_item_price(item, price_list_name, item_price) except frappe.NameError: pass def make_item_price(item, price_list_name, item_price): frappe.get_doc({ "doctype": "Item Price", "price_list": price_list_name, "item_code": item, "price_list_rate": item_price }).insert() def create_customers(args): for i in xrange(1,6): customer = args.get("customer_" + str(i)) if customer: try: doc = frappe.get_doc({ "doctype":"Customer", "customer_name": customer, "customer_type": "Company", "customer_group": _("Commercial"), "territory": args.get("country"), "company": args.get("company_name").strip() }).insert() if args.get("customer_contact_" + str(i)): create_contact(args.get("customer_contact_" + str(i)), "Customer", doc.name) except frappe.NameError: pass def create_suppliers(args): for i in xrange(1,6): supplier = args.get("supplier_" + str(i)) if supplier: try: doc = frappe.get_doc({ "doctype":"Supplier", "supplier_name": supplier, "supplier_type": _("Local"), "company": args.get("company_name").strip() }).insert() if args.get("supplier_contact_" + str(i)): create_contact(args.get("supplier_contact_" + str(i)), "Supplier", doc.name) except frappe.NameError: pass def create_contact(contact, party_type, party): """Create contact based on given contact name""" contact = contact.strip().split(" ") contact = frappe.get_doc({ "doctype":"Contact", "first_name":contact[0], "last_name": len(contact) > 1 and contact[1] or "" }) contact.append('links', dict(link_doctype=party_type, link_name=party)) contact.insert() def create_letter_head(args): if args.get("attach_letterhead"): frappe.get_doc({ "doctype":"Letter Head", "letter_head_name": _("Standard"), "is_default": 1 }).insert() attach_letterhead = args.get("attach_letterhead").split(",") if len(attach_letterhead)==3: filename, filetype, content = attach_letterhead fileurl = save_file(filename, content, "Letter Head", _("Standard"), decode=True).file_url frappe.db.set_value("Letter Head", _("Standard"), "content", "<img src='%s' style='max-width: 100%%;'>" % fileurl) def create_logo(args): if args.get("attach_logo"): attach_logo = args.get("attach_logo").split(",") if len(attach_logo)==3: filename, filetype, content = attach_logo fileurl = save_file(filename, content, "Website Settings", "Website Settings", decode=True).file_url frappe.db.set_value("Website Settings", "Website Settings", "brand_html", "<img src='{0}' style='max-width: 40px; max-height: 25px;'> {1}".format(fileurl, args.get("company_name").strip())) def create_territories(): """create two default territories, one for home country and one named Rest of the World""" from frappe.utils.nestedset import get_root_of country = frappe.db.get_default("country") root_territory = get_root_of("Territory") for name in (country, _("Rest Of The World")): if name and not frappe.db.exists("Territory", name): frappe.get_doc({ "doctype": "Territory", "territory_name": name.replace("'", ""), "parent_territory": root_territory, "is_group": "No" }).insert() def login_as_first_user(args): if args.get("email") and hasattr(frappe.local, "login_manager"): frappe.local.login_manager.login_as(args.get("email")) def create_users(args): if frappe.session.user == 'Administrator': return # create employee for self emp = frappe.get_doc({ "doctype": "Employee", "employee_name": " ".join(filter(None, [args.get("first_name"), args.get("last_name")])), "user_id": frappe.session.user, "status": "Active", "company": args.get("company_name") }) emp.flags.ignore_mandatory = True emp.insert(ignore_permissions = True) for i in xrange(1,5): email = args.get("user_email_" + str(i)) fullname = args.get("user_fullname_" + str(i)) if email: if not fullname: fullname = email.split("@")[0] parts = fullname.split(" ", 1) user = frappe.get_doc({ "doctype": "User", "email": email, "first_name": parts[0], "last_name": parts[1] if len(parts) > 1 else "", "enabled": 1, "user_type": "System User" }) # default roles user.append_roles("Projects User", "Stock User", "Support Team") if args.get("user_sales_" + str(i)): user.append_roles("Sales User", "Sales Manager", "Accounts User") if args.get("user_purchaser_" + str(i)): user.append_roles("Purchase User", "Purchase Manager", "Accounts User") if args.get("user_accountant_" + str(i)): user.append_roles("Accounts Manager", "Accounts User") user.flags.delay_emails = True if not frappe.db.get_value("User", email): user.insert(ignore_permissions=True) # create employee emp = frappe.get_doc({ "doctype": "Employee", "employee_name": fullname, "user_id": email, "status": "Active", "company": args.get("company_name") }) emp.flags.ignore_mandatory = True emp.insert(ignore_permissions = True) def create_academic_term(): at = ["Semester 1", "Semester 2", "Semester 3"] ay = ["2013-14", "2014-15", "2015-16", "2016-17", "2017-18"] for y in ay: for t in at: academic_term = frappe.new_doc("Academic Term") academic_term.academic_year = y academic_term.term_name = t try: academic_term.save() except frappe.DuplicateEntryError: pass def create_academic_year(): ac = ["2013-14", "2014-15", "2015-16", "2016-17", "2017-18"] for d in ac: academic_year = frappe.new_doc("Academic Year") academic_year.academic_year_name = d try: academic_year.save() except frappe.DuplicateEntryError: pass def create_program(args): for i in xrange(1,6): if args.get("program_" + str(i)): program = frappe.new_doc("Program") program.program_name = args.get("program_" + str(i)) try: program.save() except frappe.DuplicateEntryError: pass def create_course(args): for i in xrange(1,6): if args.get("course_" + str(i)): course = frappe.new_doc("Course") course.course_name = args.get("course_" + str(i)) try: course.save() except frappe.DuplicateEntryError: pass def create_instructor(args): for i in xrange(1,6): if args.get("instructor_" + str(i)): instructor = frappe.new_doc("Instructor") instructor.instructor_name = args.get("instructor_" + str(i)) try: instructor.save() except frappe.DuplicateEntryError: pass def create_room(args): for i in xrange(1,6): if args.get("room_" + str(i)): room = frappe.new_doc("Room") room.room_name = args.get("room_" + str(i)) room.seating_capacity = args.get("room_capacity_" + str(i)) try: room.save() except frappe.DuplicateEntryError: pass
agpl-3.0
wikimedia/analytics-aggregator
aggregator/projectcounts.py
1
34906
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ aggregator.projectcounts ~~~~~~~~~~~~~~~~~~~~~~~~ This module contains functions to aggregate Wikimedia's projectcount files. """ import logging import calendar import datetime import os import glob import util PROJECTVIEWS_STRFTIME_PATTERN = ('%%Y%s%%Y-%%m%sprojectviews-%%Y%%m%%d-' '%%H0000' % (os.sep, os.sep)) PROJECTCOUNTS_STRFTIME_PATTERN = ('%%Y%s%%Y-%%m%sprojectcounts-%%Y%%m%%d-' '%%H0000' % (os.sep, os.sep)) CSV_HEADER = 'Date,Total,Desktop site,Mobile site,Zero site' DATE_MOBILE_ADDED = datetime.date(2014, 9, 23) cache = {} def clear_cache(): global cache logging.debug("Clearing projectcounts cache") cache = {} def aggregate_for_date( source_dir_abs, date, allow_bad_data=False, output_projectviews=False): """Aggregates hourly projectcounts for a given day. This function does not attempt to cache the aggregated data. Either cache yourself, or is helper methods that cache, as get_hourly_count_for_webstatscollector_abbreviation. If one of the required 24 hourly files do not exist, cannot be read, or some other issue occurs, a RuntimeError is raised. The returned dictonary is keyed by the lowercase webstatscollector abbreviation, and values are the total counts for this day. :param source_dir_abs: Absolute directory to read the hourly projectcounts files from. :param date: The date to get the count for. :param allow_bad_data: If True, do not bail out, if some data is bad or missing. (Default: False) :param output_projectviews: If True, name the output files projectviews instead of projectcounts. (Default: False) """ daily_data = {} if output_projectviews: output_format = PROJECTVIEWS_STRFTIME_PATTERN else: output_format = PROJECTCOUNTS_STRFTIME_PATTERN for hour in range(24): # Initialize with the relevant hour start ... hourly_file_datetime = datetime.datetime(date.year, date.month, date.day, hour) # and add another hour since webstatscollector uses the interval end in # the file name :-( hourly_file_datetime += datetime.timedelta(hours=1) hourly_file_abs = os.path.join( source_dir_abs, hourly_file_datetime.strftime(output_format)) if not os.path.isfile(hourly_file_abs): if allow_bad_data: # The file does not exist, but bad data is explicitly # allowed, so we continue aggregating continue else: raise RuntimeError("'%s' is not an existing file" % ( hourly_file_abs)) logging.debug("Reading %s" % (hourly_file_abs)) with open(hourly_file_abs, 'r') as hourly_file: for line in hourly_file: fields = line.split(' ') if len(fields) != 4: logging.warn("File %s as an incorrect line: %s" % ( hourly_file_abs, line)) # Kept in case we want to get back to raising an error # raise RuntimeError("Malformed line in '%s'" % ( # hourly_file)) else: abbreviation = fields[0].lower() count = int(fields[2]) daily_data[abbreviation] = daily_data.get(abbreviation, 0) \ + count return daily_data def get_daily_count(source_dir_abs, webstatscollector_abbreviation, date, allow_bad_data=False, output_projectviews=False): """Obtains the daily count for a webstatscollector abbreviation. Data gets cached upon read. For a day, the data is <50KB, so having many dates in cache is not resource intensive. :param source_dir_abs: Absolute directory to read the hourly projectcounts files from. :param webstatscollector_abbreviation: The webstatscollector abbreviation to get the count for :param date: The date to get the count for. :param allow_bad_data: If True, do not bail out, if some data is bad or missing. (Default: False) :param output_projectviews: If True, name the output files projectviews instead of projectcounts. (Default: False) """ global cache try: source_dir_cache = cache[source_dir_abs] except KeyError: source_dir_cache = {} cache[source_dir_abs] = source_dir_cache try: date_data = source_dir_cache[date] except KeyError: date_data = aggregate_for_date( source_dir_abs, date, allow_bad_data, output_projectviews ) source_dir_cache[date] = date_data return date_data.get(webstatscollector_abbreviation, 0) def update_daily_csv(target_dir_abs, dbname, csv_data_input, first_date, last_date, bad_dates=[], force_recomputation=False): """Updates daily per project CSVs from a csv data dictionary. The existing per project CSV files in target_dir_abs/daily are updated from first_date up to (and including) last_date. If the CSVs already has data for a given day, it is not recomputed, unless force_recomputation is True. But if a day is in the set of days that are considered, and it is also in bad_dates, the data for this day is removed regardless of force_recomputation. Upon any error, the function raises an exception. :param target_dir_abs: Absolute directory. CSVs are getting written to the 'daily' subdirectory of target_dir_abs. :param dbname: The database name of the wiki to consider (E.g.: 'enwiki') :param csv_data_input: The data dict to aggregate from :param first_date: The first date to compute non-existing data for. :param last_date: The last date to compute non-existing data for. :param bad_dates: List of dates considered having bad data. (Default: []) :param force_recomputation: If True, recompute data for the given days, even if it is already in the CSV. (Default: False) """ csv_dir_abs = os.path.join(target_dir_abs, 'daily') if not os.path.exists(csv_dir_abs): os.mkdir(csv_dir_abs) csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv') csv_data = util.parse_csv_to_first_column_dict(csv_file_abs) for date in util.generate_dates(first_date, last_date): date_str = date.isoformat() logging.debug("Updating csv '%s' for date '%s'" % ( dbname, str(date))) if date in bad_dates: if date_str in csv_data: del csv_data[date_str] else: if date_str not in csv_data or force_recomputation: if date_str not in csv_data_input: raise RuntimeError("No data for '%s' during daily " "aggregation" % (date_str)) csv_data[date_str] = csv_data_input[date_str] util.write_dict_values_sorted_to_csv( csv_file_abs, csv_data, header=CSV_HEADER) def rescale_counts(csv_data, dates, bad_dates, rescale_to): """Extracts relevant dates from CSV data, sums them up, and rescales them. If the dates only cover bad dates, None is returned. Each column is rescaled separatedly. Missing columns for good dates are not assumed to be 0. The first column is ignored, and assumed to hold the date for the reading. The second column is assumed to hold the sum of the remaining columns. This column is not rescaled, but the recomputed by summing the other rescaled columns. Thereby, we can guarantee that the "total sum" always is the sum of the other columns. Upon other errors, a RuntimeError is raised. The rescaled counts are returned as list of integers. :param csv_data_input: The data dict to get data from :param dates: The dates to sum up counts for :param bad_dates: List of dates considered having bad data. :param rescale_to: Rescale the good entries to this many entries. """ ret = None aggregations = None columns = 0 for date in dates: if date in bad_dates: continue date_str = date.isoformat() try: csv_line_items = csv_data[date_str].split(',') except KeyError: raise RuntimeError("No data for '%s'" % (date_str)) # Getting rid if date column. No need to aggregate date columns. del csv_line_items[0] # Getting rid of the "total sum" column. # We always want the "total sum" column to be the sum of the # other columns in the row. Hence, we cannot simply rescale # the "total sum" column from the other rows, as that would on # the one hand give rounding artifacts, and on the other hand # would not work if some row is missing values for some # columns. Therefore, we don't rescale the "total sum" column, # but recompute it after the other columns' rescaled value is # known. del csv_line_items[0] if ret is None: ret = [] aggregations = [] # Make sure we can fit csv_line_items's columns into the aggregations while columns < len(csv_line_items): ret.append(0) aggregations.append(0) columns += 1 for i in range(columns): try: ret[i] += int(csv_line_items[i].strip()) aggregations[i] += 1 except IndexError: # csv_line_times is shorter than ret. pass except ValueError: # No valid reading. (E.g. the empty string) pass if ret is not None: # Since we found readings, rescale. ret = [(ret[i] * rescale_to) / aggregations[i] if aggregations[i] else None for i in range(columns)] # Then recompute the "total sum" column and prepend it. ret.insert(0, sum([0 if i is None else i for i in ret])) return ret def update_weekly_csv(target_dir_abs, dbname, csv_data_input, first_date, last_date, bad_dates=[], force_recomputation=False): """Updates weekly per project CSVs from a csv data dictionary. The existing per project CSV files in target_dir_abs/weekly are updated for all weeks where Sunday in in the date interval from first_date up to (and including) last_date. For weekly aggregations, a week's total data is rescaled to 7 days. If a week under consideration contains no good date, it is removed. Upon any error, the function raises an exception. :param target_dir_abs: Absolute directory. CSVs are getting written to the 'weekly_rescaled' subdirectory of target_dir_abs. :param dbname: The database name of the wiki to consider (E.g.: 'enwiki') :param csv_data_input: The data dict to aggregate from :param first_date: The first date to compute non-existing data for. :param last_date: The last date to compute non-existing data for. :param bad_dates: List of dates considered having bad data. (Default: []) :param force_recomputation: If True, recompute data for the given days, even if it is already in the CSV. (Default: False) """ csv_dir_abs = os.path.join(target_dir_abs, 'weekly_rescaled') if not os.path.exists(csv_dir_abs): os.mkdir(csv_dir_abs) csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv') csv_data = util.parse_csv_to_first_column_dict(csv_file_abs) for date in util.generate_dates(first_date, last_date): if date.weekday() == 6: # Sunday. End of ISO week date_str = date.strftime('%GW%V') logging.debug("Updating csv '%s' for date '%s'" % ( dbname, str(date))) week_dates = set(date + datetime.timedelta(days=offset) for offset in range(-6, 1)) expected_good_dates = len(week_dates - set(bad_dates)) need_recomputation = force_recomputation need_recomputation |= expected_good_dates != 7 need_recomputation |= date_str not in csv_data if need_recomputation: if expected_good_dates == 0: try: del csv_data[date_str] except KeyError: # No reading was there to remove. That's ok :-) pass else: weekly_counts = rescale_counts(csv_data_input, week_dates, bad_dates, 7) util.update_csv_data_dict( csv_data, date_str, *weekly_counts) util.write_dict_values_sorted_to_csv( csv_file_abs, csv_data, header=CSV_HEADER) def update_monthly_csv(target_dir_abs, dbname, csv_data_input, first_date, last_date, bad_dates=[], force_recomputation=False): """Updates monthly per project CSVs from a csv data dictionary. The existing per project CSV files in target_dir_abs/monthly_rescaled are updated for all months where the last day of the month is in the date interval from first_date up to (and including) last_date. For monthly aggregations, a month's total data is rescaled to 30 days. If a month under consideration contains no good date, it is removed. Upon any error, the function raises an exception. :param target_dir_abs: Absolute directory. CSVs are getting written to the 'monthly_rescaled' subdirectory of target_dir_abs. :param dbname: The database name of the wiki to consider (E.g.: 'enwiki') :param csv_data_input: The data dict to aggregate from :param first_date: The first date to compute non-existing data for. :param last_date: The last date to compute non-existing data for. :param bad_dates: List of dates considered having bad data. (Default: []) :param force_recomputation: If True, recompute data for the given days, even if it is already in the CSV. (Default: False) """ csv_dir_abs = os.path.join(target_dir_abs, 'monthly_rescaled') if not os.path.exists(csv_dir_abs): os.mkdir(csv_dir_abs) csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv') csv_data = util.parse_csv_to_first_column_dict(csv_file_abs) for date in util.generate_dates(first_date, last_date): if (date + datetime.timedelta(days=1)).day == 1: # date + 1 day is the first of a month, so date is the last of a # month. Let's compute for this month date_str = date.strftime('%Y-%m') logging.debug("Updating csv '%s' for date '%s'" % ( dbname, date_str)) days_in_month = date.day month_dates = set(datetime.date(date.year, date.month, day) for day in range(1, days_in_month+1)) expected_good_dates = len(month_dates - set(bad_dates)) need_recomputation = force_recomputation need_recomputation |= expected_good_dates != days_in_month need_recomputation |= date_str not in csv_data if need_recomputation: if expected_good_dates == 0: try: del csv_data[date_str] except KeyError: # No reading was there to remove. That's ok :-) pass else: monthly_counts = rescale_counts(csv_data_input, month_dates, bad_dates, 30) util.update_csv_data_dict( csv_data, date_str, *monthly_counts) util.write_dict_values_sorted_to_csv( csv_file_abs, csv_data, header=CSV_HEADER) def update_yearly_csv(target_dir_abs, dbname, csv_data_input, first_date, last_date, bad_dates=[], force_recomputation=False): """Updates yearly per project CSVs from a csv data dictionary. The existing per project CSV files in target_dir_abs/yearly_rescaled are updated for all years where the last day of the year is in the date interval from first_date up to (and including) last_date. For yearly aggregations, a year's total data is rescaled to 365 days. If a year under consideration contains no good date, it is removed. Upon any error, the function raises an exception. :param target_dir_abs: Absolute directory. CSVs are getting written to the 'yearly_rescaled' subdirectory of target_dir_abs. :param dbname: The database name of the wiki to consider (E.g.: 'enwiki') :param csv_data_input: The data dict to aggregate from :param first_date: The first date to compute non-existing data for. :param last_date: The last date to compute non-existing data for. :param bad_dates: List of dates considered having bad data. (Default: []) :param force_recomputation: If True, recompute data for the given days, even if it is already in the CSV. (Default: False) """ csv_dir_abs = os.path.join(target_dir_abs, 'yearly_rescaled') if not os.path.exists(csv_dir_abs): os.mkdir(csv_dir_abs) csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv') csv_data = util.parse_csv_to_first_column_dict(csv_file_abs) for date in util.generate_dates(first_date, last_date): if date.month == 12 and date.day == 31: # date is the last day of a year. Let's compute for this year date_str = date.strftime('%Y') logging.debug("Updating csv '%s' for date '%s'" % ( dbname, date_str)) days_in_year = 366 if calendar.isleap(date.year) else 365 year_dates = set(date - datetime.timedelta(days=offset) for offset in range(0, days_in_year)) expected_good_dates = len(year_dates - set(bad_dates)) need_recomputation = force_recomputation need_recomputation |= expected_good_dates != days_in_year need_recomputation |= date_str not in csv_data if need_recomputation: if expected_good_dates == 0: try: del csv_data[date_str] except KeyError: # No reading was there to remove. That's ok :-) pass else: yearly_counts = rescale_counts(csv_data_input, year_dates, bad_dates, 365) util.update_csv_data_dict( csv_data, date_str, *yearly_counts) util.write_dict_values_sorted_to_csv( csv_file_abs, csv_data, header=CSV_HEADER) def update_per_project_csvs_for_dates( source_dir_abs, target_dir_abs, first_date, last_date, bad_dates=[], additional_aggregators=[], force_recomputation=False, compute_all_projects=False, output_projectviews=False): """Updates per project CSVs from hourly projectcounts files. The existing per project CSV files in the daily_raw subdirectory of target_dir_abs are updated with daily data from first_date up to (and including) last_date. If the CSVs already has data for a given day, it is not recomputed, unless force_recomputation is True. Upon any error, the function raises an exception without cleaning or syncing up the CSVs. So if the first CSV could get updated, but there are issues with the second, the data written to the first CSV survives. Hence, the CSVs need not end with the same date upon error. :param source_dir_abs: Absolute directory to read the hourly projectcounts files from. :param target_dir_abs: Absolute directory of the per project CSVs. :param first_date: The first date to compute non-existing data for. :param last_date: The last date to compute non-existing data for. :param bad_dates: List of dates considered having bad data. (Default: []) :param additionaly_aggregators: List of functions to additionally aggregate with. Those functions need to take target_dir_abs, dbname, csv_data_input, first_date, last_date, bad_dates, and force_recomputation as paramaters (in that order). dbname is the database name for the wiki to aggregate for, and csv_data_input is the CSV data dictionary for the daily_raw aggregation. The other parameters and just passed through. (Default: []) :param force_recomputation: If True, recompute data for the given days, even if it is already in the CSV. (Default: False) :param compute_all_projects: If True, compute counts for all projects into a file named 'all.csv'. :param output_projectviews: If True, name the output files projectviews instead of projectcounts. (Default: False) """ # Contains the aggregation of all data across projects indexed by date. all_projects_data = {} for csv_file_abs in sorted(glob.glob(os.path.join( target_dir_abs, 'daily_raw', '*.csv'))): dbname = os.path.basename(csv_file_abs) dbname = dbname.rsplit('.csv', 1)[0] if dbname == 'all': # 'all.csv' is an aggregation across all projects # and should not be processed. continue logging.info("Updating csv '%s'" % (csv_file_abs)) csv_data = util.parse_csv_to_first_column_dict(csv_file_abs) for date in util.generate_dates(first_date, last_date): date_str = date.isoformat() logging.debug("Updating csv '%s' for date '%s'" % ( dbname, str(date))) if date_str not in csv_data or force_recomputation: # Check if to allow bad data for this day allow_bad_data = date in bad_dates # desktop site abbreviation = util.dbname_to_webstatscollector_abbreviation( dbname, 'desktop') count_desktop = get_daily_count( source_dir_abs, abbreviation, date, allow_bad_data, output_projectviews, ) # mobile site abbreviation = util.dbname_to_webstatscollector_abbreviation( dbname, 'mobile') count_mobile = get_daily_count( source_dir_abs, abbreviation, date, allow_bad_data, output_projectviews, ) # zero site abbreviation = util.dbname_to_webstatscollector_abbreviation( dbname, 'zero') count_zero = get_daily_count( source_dir_abs, abbreviation, date, allow_bad_data, output_projectviews, ) count_total = count_desktop if date >= DATE_MOBILE_ADDED: count_total += count_mobile + count_zero # injecting obtained data util.update_csv_data_dict( csv_data, date_str, count_total, count_desktop, count_mobile if date >= DATE_MOBILE_ADDED else None, count_zero if date >= DATE_MOBILE_ADDED else None) _write_raw_and_aggregated_csv_data( target_dir_abs, dbname, csv_data, first_date, last_date, additional_aggregators, bad_dates, force_recomputation) # Aggregates values across all projects if compute_all_projects: util.merge_sum_csv_data_dict(all_projects_data, csv_data) # Writes aggregations across all projects if compute_all_projects: oldest_date = util.parse_string_to_date(min(all_projects_data.keys())) newest_date = util.parse_string_to_date(max(all_projects_data.keys())) _write_raw_and_aggregated_csv_data( target_dir_abs, 'all', all_projects_data, oldest_date, newest_date, additional_aggregators, bad_dates, force_recomputation) def _write_raw_and_aggregated_csv_data( target_dir_abs, dbname, csv_data, first_date, last_date, additional_aggregators, bad_dates, force_recomputation): """ Writes the data passed in the csv_data dict to various destinations: 1. Writes the raw data (as it comes in the csv_data dict), to: <target_dir_abs>/daily_raw/<dbname>.csv 2. Uses each aggregator in additional_aggregators to write the data to the aggregator's specific location. Note: Some of this method's parameters are just forwarded to these aggregators. :param target_dir_abs: Absolute directory of the per project CSVs. :param dbname: The database name of the wiki to consider (E.g.: 'enwiki') :param csv_data: The dict containing the data to be written. :param first_date: The first date to write non-existing data for. :param last_date: The last date to write non-existing data for. :param additional_aggregators: See update_per_project_csvs_for_dates. :param bad_dates: List of dates considered having bad data. :param force_recomputation: If True, recompute data for the given days. """ csv_file_abs = os.path.join(target_dir_abs, 'daily_raw', dbname + '.csv') util.write_dict_values_sorted_to_csv( csv_file_abs, csv_data, header=CSV_HEADER) for additional_aggregator in additional_aggregators: additional_aggregator( target_dir_abs, dbname, csv_data, first_date, last_date, bad_dates=bad_dates, force_recomputation=force_recomputation) def _get_validity_issues_for_aggregated_projectcounts_generic( csv_dir_abs, total_expected, desktop_site_expected, mobile_site_expected, zero_site_expected, current_date_strs): """Gets a list of obvious validity issues for a directory of CSVs :param csv_dir_abs: Absolute directory of the per project CSVs. :param total_expected: Expect at least that many views to overal on big wikis. :param desktop_site_expected: Expect at least that many views to desktop site on big wikis. :param mobile_site_expected: Expect at least that many views to mobile site on big wikis. :param zero_site_expected: Expect at least that many views to zero site on big wikis. :param current_date_strs: Expect one of those as date string of the final item in the CSVs. """ issues = [] dbnames = [] big_wikis = [ 'enwiki', 'jawiki', 'dewiki', 'eswiki', 'frwiki', 'ruwiki', 'itwiki', ] for csv_file_abs in sorted(glob.glob(os.path.join(csv_dir_abs, '*.csv'))): logging.info("Checking csv '%s'" % (csv_file_abs)) dbname = os.path.basename(csv_file_abs) dbname = dbname.rsplit('.csv', 1)[0] dbnames.append(dbname) with open(csv_file_abs, 'r') as file: lines = file.readlines() if len(lines): # Analyze last line last_line = (lines[-1]).rstrip('\n\r') last_line_split = last_line.split(',') if len(last_line_split) == 5: # Check if last line is current. try: if last_line_split[0] not in current_date_strs: issues.append("Last line of %s is too old " "'%s'" % (csv_file_abs, last_line)) except ValueError: issues.append("Last line of %s is too old " "'%s'" % (csv_file_abs, last_line)) if dbname in big_wikis: # Check total count try: if int(last_line_split[1]) < total_expected: issues.append("Total count of last line of " "%s is too low '%s'" % ( csv_file_abs, last_line)) except ValueError: issues.append("Total count of last line of %s is" "not an integer '%s'" % ( csv_file_abs, last_line)) # Check desktop count try: if int(last_line_split[2]) < desktop_site_expected: issues.append("Desktop count of last line of " "%s is too low '%s'" % ( csv_file_abs, last_line)) except ValueError: issues.append("Desktop count of last line of %s is" "not an integer '%s'" % ( csv_file_abs, last_line)) # Check mobile count try: if int(last_line_split[3]) < mobile_site_expected: issues.append("Mobile count of last line of " "%s is too low '%s'" % ( csv_file_abs, last_line)) except ValueError: issues.append("Mobile count of last line of %s is" "not an integer '%s'" % ( csv_file_abs, last_line)) # Check zero count try: if int(last_line_split[4]) < zero_site_expected: issues.append("Zero count of last line of " "%s is too low '%s'" % ( csv_file_abs, last_line)) except ValueError: issues.append("Desktop count of last line of %s is" "not an integer '%s'" % ( csv_file_abs, last_line)) # Check zero count try: if int(last_line_split[1]) != \ int(last_line_split[2]) + \ int(last_line_split[3]) + \ int(last_line_split[4]): issues.append( "Total column is not the sum of " "individual columns in '%s' for %s" % ( last_line, csv_file_abs)) except ValueError: # Some column is not a number. This has already # been reported above, so we just pass. pass else: issues.append("Last line of %s does not have 5 columns: " "'%s'" % (csv_file_abs, last_line)) else: issues.append("No lines for %s" % csv_file_abs) if not len(dbnames): issues.append("Could not find any CSVs") if set(big_wikis) - set(dbnames): issues.append("Not all big wikis covered (Missing: %s)" % ( [x for x in (set(big_wikis) - set(dbnames))])) if not (set(dbnames) - set(big_wikis)): issues.append("No wikis beyond the big wikis") return sorted(issues) def get_validity_issues_for_aggregated_projectcounts(data_dir_abs): """Gets a list of obvious validity issues of aggregated projectcount CSVs :param data_dir_abs: Absolute directory of the per project CSVs. """ issues = [] current_dates = [ datetime.date.today(), util.parse_string_to_date('yesterday') ] # daily_raw files issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic( os.path.join(data_dir_abs, 'daily_raw'), 1000000, 1000000, 10000, 100, set(date.isoformat() for date in current_dates) )) # daily files issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic( os.path.join(data_dir_abs, 'daily'), 1000000, 1000000, 10000, 100, set(date.isoformat() for date in current_dates) )) # weekly files issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic( os.path.join(data_dir_abs, 'weekly_rescaled'), 10000000, 10000000, 100000, 1000, set((date - datetime.timedelta(days=6)).strftime('%GW%V') for date in current_dates) )) # monthly files issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic( os.path.join(data_dir_abs, 'monthly_rescaled'), 50000000, 50000000, 500000, 5000, set(( datetime.date(date.year, date.month, 1) - datetime.timedelta(days=1) ).strftime('%Y-%m') for date in current_dates) )) # yearly files issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic( os.path.join(data_dir_abs, 'yearly_rescaled'), 700000000, 700000000, 7000000, 70000, set(( datetime.date(date.year, 1, 1) - datetime.timedelta(days=1) ).strftime('%Y') for date in current_dates) )) return issues
apache-2.0
cccfran/sympy
sympy/liealgebras/type_d.py
17
4768
from sympy.core import Dict, Tuple from sympy.sets import Set from .cartan_type import Standard_Cartan from sympy.matrices import eye class TypeD(Standard_Cartan): def __new__(cls, n): if n < 3: raise ValueError("n cannot be less than 3") return Standard_Cartan.__new__(cls, "D", n) def dimension(self): """ Return the dimension of the vector space V underlying the Lie algebra Example ======== >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType("D4") >>> c.dimension() 4 """ return self.n def basic_root(self, i, j): """ This is a method just to generate roots with a 1 iin the ith position and a -1 in the jth postion. """ n = self.n root = [0]*n root[i] = 1 root[j] = -1 return root def simple_root(self, i): """ Every lie algebra has a unique root system. Given a root system Q, there is a subset of the roots such that an element of Q is called a simple root if it cannot be written as the sum of two elements in Q. If we let D denote the set of simple roots, then it is clear that every element of Q can be written as a linear combination of elements of D with all coefficients non-negative. In D_n, the first n-1 simple roots are the same as the roots in A_(n-1) (a 1 in the ith position, a -1 in the (i+1)th position, and zeroes elsewhere). The nth simple root is the root in which there 1s in the nth and (n-1)th positions, and zeroes elsewhere. This method returns the ith simple root for the D series. Example ======== >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType("D4") >>> c.simple_root(2) [0, 1, -1, 0] """ n = self.n if i < n: return self.basic_root(i-1, i) else: root = [0]*n root[n-2] = 1 root[n-1] = 1 return root def positive_roots(self): """ This method generates all the positive roots of A_n. This is half of all of the roots of D_n by multiplying all the positive roots by -1 we get the negative roots. Example ====== >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType("A3") >>> c.positive_roots() {1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0], 5: [0, 1, 0, -1], 6: [0, 0, 1, -1]} """ n = self.n posroots = {} k = 0 for i in range(0, n-1): for j in range(i+1, n): k += 1 posroots[k] = self.basic_root(i, j) k += 1 root = self.basic_root(i, j) root[j] = 1 posroots[k] = root return posroots def roots(self): """ Returns the total number of roots for D_n" """ n = self.n return 2*n*(n-1) def cartan_matrix(self): """ Returns the Cartan matrix for D_n. The Cartan matrix matrix for a Lie algebra is generated by assigning an ordering to the simple roots, (alpha[1], ...., alpha[l]). Then the ijth entry of the Cartan matrix is (<alpha[i],alpha[j]>). Example ======= >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType('D4') >>> c.cartan_matrix() Matrix([ [ 2, -1, 0, 0], [-1, 2, -1, -1], [ 0, -1, 2, 0], [ 0, -1, 0, 2]]) """ n = self.n m = 2*eye(n) i = 1 while i < n-2: m[i,i+1] = -1 m[i,i-1] = -1 i += 1 m[n-2, n-3] = -1 m[n-3, n-1] = -1 m[n-1, n-3] = -1 m[0, 1] = -1 return m def basis(self): """ Returns the number of independent generators of D_n """ n = self.n return n*(n-1)/2 def lie_algebra(self): """ Returns the Lie algebra associated with D_n" """ n = self.n return "so(" + str(2*n) + ")" def dynkin_diagram(self): n = self.n diag = " "*4*(n-3) + str(n-1) + "\n" diag += " "*4*(n-3) + "0\n" diag += " "*4*(n-3) +"|\n" diag += " "*4*(n-3) + "|\n" diag += "---".join("0" for i in range(1,n)) + "\n" diag += " ".join(str(i) for i in range(1, n-1)) + " "+str(n) return diag
bsd-3-clause
raubana/NeverEndingDungeon
libs/TileSystem.py
1
19615
# ====== IMPORTS ====== # --- Pygame --- import pygame # --- Custom Modules --- from common import copy_color, lerp_colors # --- Misc. --- import random, math # ====== CONSTANTS ====== TILE_SIZE = 48 # The 2D size of a side of a single tile in pixels. TILE_FLOOR_COLOR = (96,96,96) TILE_GRASSTILE_COLOR = (100,165,75) TILE_DIRTWALLTILE_COLOR = (85,50,25) TILE_DIRTFLOORTILE_COLOR = (135,100,55) TILE_TRIGGERTILE_COLOR = TILE_FLOOR_COLOR#lerp_colors(TILE_FLOOR_COLOR,(255,0,0),0.1) TILE_SPAWNER1TILE_COLOR = (192,192,127) TILE_SPAWNER2TILE_COLOR = (127,127,192) TILE_FLATTENED_COLOR = (127,127,127) TILE_WALLTILE_COLOR = (64,127,192) TILE_PIT_COLOR = (0,0,0) TILE_HINT_COLOR_STRENGTH = 0.5 OUTLINE_NORMAL = 1 OUTLINE_OUTSET = 2 OUTLINE_INSET = 3 DEBUG_FORCE_FULL_RERENDER = False # ====== FUNCTIONS ====== def offset_to_coords(offset): """This is for converting a VisibleGrid's offset to coordinates, which tell us which tile is in the top-left corner.""" return (-int(offset[0]/TILE_SIZE), -int(offset[1]/TILE_SIZE)) def round_coords(coords): return (int(math.floor(coords[0])), int(math.floor(coords[1]))) def get_flattened_grid(grid, size = None): if size == None: size = grid.gridsize new_grid = Grid(grid.main) new_grid.gridsize = size new_grid.tiles = [] for y in xrange(size[1]): row = [] for x in xrange(size[0]): is_pit = False is_solid = False new_tile = Tile(grid.main) new_tile.color = TILE_FLATTENED_COLOR if x < grid.gridsize[0] and y < grid.gridsize[1]: color = grid.tiles[y][x].color if grid.tiles[y][x].is_a_pit: is_pit = True elif grid.tiles[y][x].solid: is_solid = True else: color = TILE_WALLTILE_COLOR is_solid = True new_tile.color = lerp_colors(new_tile.color, color, TILE_HINT_COLOR_STRENGTH) if is_pit: new_tile.outline_type = OUTLINE_NORMAL new_tile.outline_strength = 0.025 else: new_tile.outline_type = OUTLINE_OUTSET row.append(new_tile) new_grid.tiles.append(row) return new_grid # ====== CLASSES ====== class Grid(object): """ This class contains the tiles of the entire world. """ def __init__(self, main, size = (0,0)): self.main = main self.gridsize = size # in tiles. self.tiles = [] for y in xrange(size[1]): row = [] for x in xrange(size[0]): row.append(Tile(main)) self.tiles.append(row) def is_legal_coords(self, coords, ignore_type = False): """ Checks if the given coordinates are within the world (specifically meaning if a tile exists at that exact location). """ #first we check if this coord is within the bounds of the grid. if coords[0] < 0 or coords[1] < 0 or coords[0] >= self.gridsize[0] or coords[1] >= self.gridsize[1]: return False #next we check if there's a tile at this location. if ignore_type: return True pos = round_coords(coords) return not self.tiles[pos[1]][pos[0]].is_a_pit def get_path(self, start_coords, end_coords, avoid_mob=True, shit_path_freq=0.1): #We also assume that the mobs can not go diagonally. #Shit paths are when the algorithm take into account stupid and often longer paths. start = (int(start_coords[0]),int(start_coords[1])) end = (int(end_coords[0]),int(end_coords[1])) open_list = [] closed_list = [] #we start by adding the start_coords to the open_list # (pos, prev pos, total distance covered, score) open_list.append( (start, [start], 0, self.score_coords(start, start, end, 0, avoid_mob)) ) #now we start our loop while len(open_list) > 0: #we need to find our best scores in the open list best_score = None best_scores = [] for tile in open_list: if best_score == None or tile[3] < best_score: best_score = tile[3] best_scores = [tile] elif tile[3] <= best_score+1 and random.random() < shit_path_freq: #This is to add a little randomness, so that mob don't overlap too often. best_scores.append(tile) #now we pick from those pick = random.choice(best_scores) #we need to move this tile from the open list to the closed list open_list.remove(pick) closed_list.append(pick) #we need to check if that was the end_coords, in which case we'll drop from the loop. #we need to put all of the neighbors into the open list offsets = ((-1,0),(0,-1),(1,0),(0,1)) for offset in offsets: pos = (pick[0][0]+offset[0], pick[0][1]+offset[1]) #we need to check if this is a legal position if self.is_legal_coords(pos): #next we check if it's an open and safe tile tile = self.tiles[pos[1]][pos[0]] if tile != None and not tile.solid: #next we must check if this pos is already in the open or closed lists match = False for pos2 in open_list+closed_list: if pos2[0] == pos: match = True break if not match: #This is a legal pos and it's not already in any of the lists, so we add it to the open_list. dist_covered = pick[2]+abs(pick[0][0]-pos[0])+abs(pick[0][1]-pos[1]) open_list.append( (pos, pick, dist_covered, self.score_coords(pos, pick[1][0], end, dist_covered, avoid_mob)) ) #first we must find our closest tile in the closed_list closest_dist = None closest_tile = None for tile in closed_list: dif = abs(tile[0][0]-end[0]) + abs(tile[0][1]-end[1]) if closest_dist == None or dif < closest_dist: closest_dist = dif closest_tile = tile #now we work our way backwards to get our path current_tile = closest_tile path = [] while current_tile[1][0] != current_tile[0]: pos = current_tile[0] pos = (pos[0]*TILE_SIZE+(TILE_SIZE/2), pos[1]*TILE_SIZE+(TILE_SIZE/2)) path.append(pos) current_tile = current_tile[1] pos = (start[0]*TILE_SIZE+(TILE_SIZE/2), start[1]*TILE_SIZE+(TILE_SIZE/2)) path.append(pos) path.reverse() #finally we return our path return path def score_coords(self, prev_coords, end_coords, target_coords, prev_distance, avoid_mob): prev = (int(prev_coords[0]),int(prev_coords[1])) end = (int(end_coords[0]),int(end_coords[1])) target = (int(target_coords[0]),int(target_coords[1])) new_score = int(prev_distance) dif = (end[0]-target[0], end[1]-target[1]) dist = (dif[0]**2 + dif[1]**2)**0.5 new_score += dist new_score += abs(end[0]-prev[0]) new_score += abs(end[1]-prev[1]) if avoid_mob: occupied = False #rect = pygame.Rect([end_coords[0]*TILE_SIZE,end_coords[1]*TILE_SIZE,TILE_SIZE,TILE_SIZE]) for npc in self.main.world.npcs: """ if npc.rect.colliderect(rect): left = rect.right - npc.rect.left top = rect.bottom - npc.rect.top right = npc.rect.right - rect.left bottom = npc.rect.bottom - rect.top m = min(left,top,right,bottom) if m > 0: """ if end_coords == npc.coords: occupied = True break if occupied: new_score += 10 return new_score class Tile(object): def __init__(self, main): self.main = main self.solid = False self.is_a_pit = False self.flag_for_rerender() self.rendered_surface = pygame.Surface((TILE_SIZE,TILE_SIZE)) self.color = copy_color(TILE_FLOOR_COLOR) self.outline_strength = 0.1 self.outline_size = 1 self.outline_type = OUTLINE_OUTSET self.init() def init(self): pass def set_color(self, new_color): if new_color != self.color: self.color = new_color self.flag_for_rerender() def flag_for_rerender(self): self.flagged_for_rerender = True def rerender(self): if self.flagged_for_rerender: self.flagged_for_rerender = False color = self.color if self.outline_size > 0: if self.outline_type == OUTLINE_NORMAL: self.rendered_surface.fill(color) outline_color = lerp_colors(color, (0,0,0), self.outline_strength) pygame.draw.rect(self.rendered_surface, outline_color, (0,0,TILE_SIZE,TILE_SIZE), self.outline_size) elif self.outline_type in (OUTLINE_INSET, OUTLINE_OUTSET): self.rendered_surface.fill(lerp_colors(color, (0,0,0), self.outline_strength*0.5)) c1 = lerp_colors(color, (255,255,255), self.outline_strength*0.25) c2 = lerp_colors(color, (0,0,0), self.outline_strength) if self.outline_type == OUTLINE_INSET: c1,c2 = c2,c1 p1 = (self.outline_size, TILE_SIZE-self.outline_size) p2 = (TILE_SIZE-self.outline_size, self.outline_size) p3 = (self.outline_size,self.outline_size) p4 = (TILE_SIZE-self.outline_size,TILE_SIZE-self.outline_size) pygame.draw.polygon(self.rendered_surface, c1, ((0,0),(TILE_SIZE,0),p2,p3,p1,(0,TILE_SIZE))) pygame.draw.polygon(self.rendered_surface, c2, ((TILE_SIZE,TILE_SIZE),(0,TILE_SIZE),p1,p4,p2,(TILE_SIZE,0))) else: self.rendered_surface.fill(color) def render(self, surface, pos): self.rerender() surface.blit(self.rendered_surface, pos) class GrassTile(Tile): def init(self): self.color = TILE_GRASSTILE_COLOR self.outline_size = 0 class DirtWallTile(Tile): def init(self): self.solid = True self.color = TILE_DIRTWALLTILE_COLOR self.outline_strength = 0.35 self.outline_size = 3 self.outline_type = OUTLINE_OUTSET class DirtFloorTile(Tile): def init(self): self.color = TILE_DIRTFLOORTILE_COLOR self.outline_size = 0 class TriggerTile(Tile): def init(self): self.id = "1" self.color = TILE_TRIGGERTILE_COLOR class Spawner1Tile(Tile): def init(self): self.color = TILE_SPAWNER1TILE_COLOR class Spawner2Tile(Tile): def init(self): self.color = TILE_SPAWNER2TILE_COLOR class WallTile(Tile): def init(self): self.solid = True self.color = copy_color(TILE_WALLTILE_COLOR) self.outline_strength = 0.35 self.outline_size = 3 self.outline_type = OUTLINE_OUTSET class PitTile(Tile): def init(self): self.is_a_pit = True self.color = copy_color(TILE_PIT_COLOR) self.outline_type = OUTLINE_INSET self.outline_size = 0 class BushTile(Tile): def init(self): self.color = TILE_FLOOR_COLOR self.solid = True self.bush = pygame.image.load("imgs/tiles/bush.png") def rerender(self): if self.flagged_for_rerender: self.flagged_for_rerender = False color = self.color if self.outline_size > 0: if self.outline_type == OUTLINE_NORMAL: self.rendered_surface.fill(color) outline_color = lerp_colors(color, (0,0,0), self.outline_strength) pygame.draw.rect(self.rendered_surface, outline_color, (0,0,TILE_SIZE,TILE_SIZE), self.outline_size) elif self.outline_type in (OUTLINE_INSET, OUTLINE_OUTSET): self.rendered_surface.fill(lerp_colors(color, (0,0,0), self.outline_strength*0.5)) c1 = lerp_colors(color, (255,255,255), self.outline_strength*0.25) c2 = lerp_colors(color, (0,0,0), self.outline_strength) if self.outline_type == OUTLINE_INSET: c1,c2 = c2,c1 p1 = (self.outline_size, TILE_SIZE-self.outline_size) p2 = (TILE_SIZE-self.outline_size, self.outline_size) p3 = (self.outline_size,self.outline_size) p4 = (TILE_SIZE-self.outline_size,TILE_SIZE-self.outline_size) pygame.draw.polygon(self.rendered_surface, c1, ((0,0),(TILE_SIZE,0),p2,p3,p1,(0,TILE_SIZE))) pygame.draw.polygon(self.rendered_surface, c2, ((TILE_SIZE,TILE_SIZE),(0,TILE_SIZE),p1,p4,p2,(TILE_SIZE,0))) else: self.rendered_surface.fill(color) self.rendered_surface.blit(self.bush,(0,0)) class CrackedTile(Tile): def init(self): self.crack = pygame.image.load("imgs/tiles/cracked_tile1.png") #self.crack = pygame.transform.flip(self.crack, bool(random.randint(0,1)), bool(random.randint(0,1))) #self.crack = pygame.transform.rotate(self.crack, random.randint(0,3)*90) def rerender(self): if self.flagged_for_rerender: self.flagged_for_rerender = False color = self.color if self.outline_size > 0: if self.outline_type == OUTLINE_NORMAL: self.rendered_surface.fill(color) outline_color = lerp_colors(color, (0,0,0), self.outline_strength) pygame.draw.rect(self.rendered_surface, outline_color, (0,0,TILE_SIZE,TILE_SIZE), self.outline_size) self.rendered_surface.blit(self.crack,(0,0)) elif self.outline_type in (OUTLINE_INSET, OUTLINE_OUTSET): self.rendered_surface.fill(lerp_colors(color, (0,0,0), self.outline_strength*0.5)) self.rendered_surface.blit(self.crack,(0,0)) c1 = lerp_colors(color, (255,255,255), self.outline_strength*0.25) c2 = lerp_colors(color, (0,0,0), self.outline_strength) if self.outline_type == OUTLINE_INSET: c1,c2 = c2,c1 p1 = (self.outline_size, TILE_SIZE-self.outline_size) p2 = (TILE_SIZE-self.outline_size, self.outline_size) p3 = (self.outline_size,self.outline_size) p4 = (TILE_SIZE-self.outline_size,TILE_SIZE-self.outline_size) pygame.draw.polygon(self.rendered_surface, c1, ((0,0),(TILE_SIZE,0),p2,p3,p1,(0,TILE_SIZE))) pygame.draw.polygon(self.rendered_surface, c2, ((TILE_SIZE,TILE_SIZE),(0,TILE_SIZE),p1,p4,p2,(TILE_SIZE,0))) else: self.rendered_surface.fill(color) class CrackedWallTile(Tile): def init(self): self.solid = True self.color = copy_color(TILE_WALLTILE_COLOR) self.outline_strength = 0.35 self.outline_size = 3 self.outline_type = OUTLINE_OUTSET self.crack = pygame.image.load("imgs/tiles/cracked_tile1.png") #self.crack = pygame.transform.flip(self.crack, bool(random.randint(0,1)), bool(random.randint(0,1))) #self.crack = pygame.transform.rotate(self.crack, random.randint(0,3)*90) def rerender(self): if self.flagged_for_rerender: self.flagged_for_rerender = False color = self.color if self.outline_size > 0: if self.outline_type == OUTLINE_NORMAL: self.rendered_surface.fill(color) outline_color = lerp_colors(color, (0,0,0), self.outline_strength) pygame.draw.rect(self.rendered_surface, outline_color, (0,0,TILE_SIZE,TILE_SIZE), self.outline_size) self.rendered_surface.blit(self.crack,(0,0)) elif self.outline_type in (OUTLINE_INSET, OUTLINE_OUTSET): self.rendered_surface.fill(lerp_colors(color, (0,0,0), self.outline_strength*0.5)) self.rendered_surface.blit(self.crack,(0,0)) c1 = lerp_colors(color, (255,255,255), self.outline_strength*0.25) c2 = lerp_colors(color, (0,0,0), self.outline_strength) if self.outline_type == OUTLINE_INSET: c1,c2 = c2,c1 p1 = (self.outline_size, TILE_SIZE-self.outline_size) p2 = (TILE_SIZE-self.outline_size, self.outline_size) p3 = (self.outline_size,self.outline_size) p4 = (TILE_SIZE-self.outline_size,TILE_SIZE-self.outline_size) pygame.draw.polygon(self.rendered_surface, c1, ((0,0),(TILE_SIZE,0),p2,p3,p1,(0,TILE_SIZE))) pygame.draw.polygon(self.rendered_surface, c2, ((TILE_SIZE,TILE_SIZE),(0,TILE_SIZE),p1,p4,p2,(TILE_SIZE,0))) else: self.rendered_surface.fill(color) class VisibleGrid(object): """ This is the class that handles the 'side-scrolling' rendering. It will handle generating and rendering of the background by way of tiles. Visible offset is created when blitting this classes rendered surface to the screen. """ def __init__(self, main): self.main = main self.gridsize = self.calc_gridsize() self.rendered_surface = pygame.Surface((self.gridsize[0]*TILE_SIZE, self.gridsize[1]*TILE_SIZE)) self.flagged_for_rerender = True # Only True when the rendered_surface needs to be completely redone. #For example, when the offset changes enough that an entire new row or column becomes visible. self.offset = (0, 0) # in pixels, this is relative to the screen. self.prev_offset = (0, 0) # this is for checking if we can reuse some of the data from the surface. self.coords = (0, 0) # in tiles, relative to the origin of the actual grid. self.prev_coords = (0, 0) self.filter = 0 self.filter_color = (255,255,255) wall = WallTile(self.main) wall.rerender() self.wall_texture = wall.rendered_surface for y in xrange(self.gridsize[1]): for x in xrange(self.gridsize[0]): self.rendered_surface.blit(self.wall_texture, (x*TILE_SIZE,y*TILE_SIZE)) self.force_full_rerender = False def apply_filter(self, filter_color, filter_type): self.filter = filter_type self.filter_color = filter_color self.rendered_surface.fill(filter_color, None, filter_type) def calc_gridsize(self): return (self.main.screen_size[0] / TILE_SIZE + 2, self.main.screen_size[1] / TILE_SIZE + 2) def set_offset(self, offset): offset = (int(offset[0]),int(offset[1])) if offset != self.offset: self.offset = offset new_coords = offset_to_coords(offset) if new_coords != self.coords: #if new_coords == self.prev_coords: # self.unflag_for_rerender() #else: self.flag_for_rerender() self.coords = tuple(new_coords) def flag_for_rerender(self): # This can be called by anything, and will usually be called when: # 1. A visible tile changes in appearance. # 2. The offset becomes great enough that an entire new row or column becomes visible. self.flagged_for_rerender = True def unflag_for_rerender(self): self.flagged_for_rerender = False def rerender(self): """ RERENDER SHOULD NEVER BE CALLED OUTSIDE OF THIS CLASS. Rerendering is not the same as Rendering. Rerendering fixes problems with the pre-rendered surface that is then rendered to the screen. However, calling "rerender" does not directly affect the screen. """ if self.flagged_for_rerender or self.force_full_rerender or DEBUG_FORCE_FULL_RERENDER: self.flagged_for_rerender = False coord_dif = (self.coords[0]-self.prev_coords[0], self.coords[1]-self.prev_coords[1]) #first we try to reuse some of the old stuff again. if (coord_dif[0] != 0 or coord_dif[1] != 0) and abs(coord_dif[0]) < self.gridsize[0] and abs(coord_dif[1]) < self.gridsize[1]: if not (self.force_full_rerender or DEBUG_FORCE_FULL_RERENDER): self.rendered_surface.blit(self.rendered_surface,(-coord_dif[0]*TILE_SIZE, -coord_dif[1]*TILE_SIZE)) # next we check for any tiles that need to be rendered to the surface, # including those that may have simply changed their appearance. for x in xrange(self.gridsize[0]): for y in xrange(self.gridsize[1]): is_new_tile = self.force_full_rerender or DEBUG_FORCE_FULL_RERENDER is_new_tile = is_new_tile or (coord_dif[0] < 0 and x < -coord_dif[0]) is_new_tile = is_new_tile or (coord_dif[1] < 0 and y < -coord_dif[1]) is_new_tile = is_new_tile or (coord_dif[0] > 0 and x >= self.gridsize[0]-coord_dif[0]) is_new_tile = is_new_tile or (coord_dif[1] > 0 and y >= self.gridsize[1]-coord_dif[1]) pos = (x+self.coords[0]-1,y+self.coords[1]-1) if self.main.world.grid.is_legal_coords(pos, ignore_type = True): tile = self.main.world.grid.tiles[pos[1]][pos[0]] else: tile = None if (tile != None and (tile.flagged_for_rerender or is_new_tile)) or (tile == None and is_new_tile): if tile: # We tell the tile to render to the surface. tile.render(self.rendered_surface, (x*TILE_SIZE,y*TILE_SIZE)) else: self.rendered_surface.blit(self.wall_texture,(x*TILE_SIZE,y*TILE_SIZE)) #self.rendered_surface.fill((0,0,0),(x*TILE_SIZE,y*TILE_SIZE,TILE_SIZE,TILE_SIZE)) if self.filter != 0: self.rendered_surface.fill(self.filter_color, (x*TILE_SIZE,y*TILE_SIZE, TILE_SIZE, TILE_SIZE), special_flags=self.filter) self.force_full_rerender = False def render(self): """ Render is only called by world or main. """ self.rerender() #In case the pre-rendered surface needs to be changed. offset = (int(self.offset[0]%TILE_SIZE)-TILE_SIZE, int(self.offset[1]%TILE_SIZE)-TILE_SIZE) self.main.screen.blit(self.rendered_surface, offset) self.prev_offset = tuple(self.offset) self.prev_coords = tuple(self.coords)
gpl-3.0
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Tools/pynche/StripViewer.py
100
15465
"""Strip viewer and related widgets. The classes in this file implement the StripViewer shown in the top two thirds of the main Pynche window. It consists of three StripWidgets which display the variations in red, green, and blue respectively of the currently selected r/g/b color value. Each StripWidget shows the color variations that are reachable by varying an axis of the currently selected color. So for example, if the color is (R,G,B)=(127,163,196) then the Red variations show colors from (0,163,196) to (255,163,196), the Green variations show colors from (127,0,196) to (127,255,196), and the Blue variations show colors from (127,163,0) to (127,163,255). The selected color is always visible in all three StripWidgets, and in fact each StripWidget highlights the selected color, and has an arrow pointing to the selected chip, which includes the value along that particular axis. Clicking on any chip in any StripWidget selects that color, and updates all arrows and other windows. By toggling on Update while dragging, Pynche will select the color under the cursor while you drag it, but be forewarned that this can be slow. """ from Tkinter import * import ColorDB # Load this script into the Tcl interpreter and call it in # StripWidget.set_color(). This is about as fast as it can be with the # current _tkinter.c interface, which doesn't support Tcl Objects. TCLPROC = '''\ proc setcolor {canv colors} { set i 1 foreach c $colors { $canv itemconfigure $i -fill $c -outline $c incr i } } ''' # Tcl event types BTNDOWN = 4 BTNUP = 5 BTNDRAG = 6 SPACE = ' ' def constant(numchips): step = 255.0 / (numchips - 1) start = 0.0 seq = [] while numchips > 0: seq.append(int(start)) start = start + step numchips = numchips - 1 return seq # red variations, green+blue = cyan constant def constant_red_generator(numchips, red, green, blue): seq = constant(numchips) return map(None, [red] * numchips, seq, seq) # green variations, red+blue = magenta constant def constant_green_generator(numchips, red, green, blue): seq = constant(numchips) return map(None, seq, [green] * numchips, seq) # blue variations, red+green = yellow constant def constant_blue_generator(numchips, red, green, blue): seq = constant(numchips) return map(None, seq, seq, [blue] * numchips) # red variations, green+blue = cyan constant def constant_cyan_generator(numchips, red, green, blue): seq = constant(numchips) return map(None, seq, [green] * numchips, [blue] * numchips) # green variations, red+blue = magenta constant def constant_magenta_generator(numchips, red, green, blue): seq = constant(numchips) return map(None, [red] * numchips, seq, [blue] * numchips) # blue variations, red+green = yellow constant def constant_yellow_generator(numchips, red, green, blue): seq = constant(numchips) return map(None, [red] * numchips, [green] * numchips, seq) class LeftArrow: _ARROWWIDTH = 30 _ARROWHEIGHT = 15 _YOFFSET = 13 _TEXTYOFFSET = 1 _TAG = ('leftarrow',) def __init__(self, canvas, x): self._canvas = canvas self.__arrow, self.__text = self._create(x) self.move_to(x) def _create(self, x): arrow = self._canvas.create_line( x, self._ARROWHEIGHT + self._YOFFSET, x, self._YOFFSET, x + self._ARROWWIDTH, self._YOFFSET, arrow='first', width=3.0, tags=self._TAG) text = self._canvas.create_text( x + self._ARROWWIDTH + 13, self._ARROWHEIGHT - self._TEXTYOFFSET, tags=self._TAG, text='128') return arrow, text def _x(self): coords = self._canvas.coords(self._TAG) assert coords return coords[0] def move_to(self, x): deltax = x - self._x() self._canvas.move(self._TAG, deltax, 0) def set_text(self, text): self._canvas.itemconfigure(self.__text, text=text) class RightArrow(LeftArrow): _TAG = ('rightarrow',) def _create(self, x): arrow = self._canvas.create_line( x, self._YOFFSET, x + self._ARROWWIDTH, self._YOFFSET, x + self._ARROWWIDTH, self._ARROWHEIGHT + self._YOFFSET, arrow='last', width=3.0, tags=self._TAG) text = self._canvas.create_text( x - self._ARROWWIDTH + 15, # BAW: kludge self._ARROWHEIGHT - self._TEXTYOFFSET, justify=RIGHT, text='128', tags=self._TAG) return arrow, text def _x(self): coords = self._canvas.coords(self._TAG) assert coords return coords[0] + self._ARROWWIDTH class StripWidget: _CHIPHEIGHT = 50 _CHIPWIDTH = 10 _NUMCHIPS = 40 def __init__(self, switchboard, master = None, chipwidth = _CHIPWIDTH, chipheight = _CHIPHEIGHT, numchips = _NUMCHIPS, generator = None, axis = None, label = '', uwdvar = None, hexvar = None): # instance variables self.__generator = generator self.__axis = axis self.__numchips = numchips assert self.__axis in (0, 1, 2) self.__uwd = uwdvar self.__hexp = hexvar # the last chip selected self.__lastchip = None self.__sb = switchboard canvaswidth = numchips * (chipwidth + 1) canvasheight = chipheight + 43 # BAW: Kludge # create the canvas and pack it canvas = self.__canvas = Canvas(master, width=canvaswidth, height=canvasheight, ## borderwidth=2, ## relief=GROOVE ) canvas.pack() canvas.bind('<ButtonPress-1>', self.__select_chip) canvas.bind('<ButtonRelease-1>', self.__select_chip) canvas.bind('<B1-Motion>', self.__select_chip) # Load a proc into the Tcl interpreter. This is used in the # set_color() method to speed up setting the chip colors. canvas.tk.eval(TCLPROC) # create the color strip chips = self.__chips = [] x = 1 y = 30 tags = ('chip',) for c in range(self.__numchips): color = 'grey' canvas.create_rectangle( x, y, x+chipwidth, y+chipheight, fill=color, outline=color, tags=tags) x = x + chipwidth + 1 # for outline chips.append(color) # create the strip label self.__label = canvas.create_text( 3, y + chipheight + 8, text=label, anchor=W) # create the arrow and text item chipx = self.__arrow_x(0) self.__leftarrow = LeftArrow(canvas, chipx) chipx = self.__arrow_x(len(chips) - 1) self.__rightarrow = RightArrow(canvas, chipx) def __arrow_x(self, chipnum): coords = self.__canvas.coords(chipnum+1) assert coords x0, y0, x1, y1 = coords return (x1 + x0) / 2.0 # Invoked when one of the chips is clicked. This should just tell the # switchboard to set the color on all the output components def __select_chip(self, event=None): x = event.x y = event.y canvas = self.__canvas chip = canvas.find_overlapping(x, y, x, y) if chip and (1 <= chip[0] <= self.__numchips): color = self.__chips[chip[0]-1] red, green, blue = ColorDB.rrggbb_to_triplet(color) etype = int(event.type) if (etype == BTNUP or self.__uwd.get()): # update everyone self.__sb.update_views(red, green, blue) else: # just track the arrows self.__trackarrow(chip[0], (red, green, blue)) def __trackarrow(self, chip, rgbtuple): # invert the last chip if self.__lastchip is not None: color = self.__canvas.itemcget(self.__lastchip, 'fill') self.__canvas.itemconfigure(self.__lastchip, outline=color) self.__lastchip = chip # get the arrow's text coloraxis = rgbtuple[self.__axis] if self.__hexp.get(): # hex text = hex(coloraxis) else: # decimal text = repr(coloraxis) # move the arrow, and set its text if coloraxis <= 128: # use the left arrow self.__leftarrow.set_text(text) self.__leftarrow.move_to(self.__arrow_x(chip-1)) self.__rightarrow.move_to(-100) else: # use the right arrow self.__rightarrow.set_text(text) self.__rightarrow.move_to(self.__arrow_x(chip-1)) self.__leftarrow.move_to(-100) # and set the chip's outline brightness = ColorDB.triplet_to_brightness(rgbtuple) if brightness <= 128: outline = 'white' else: outline = 'black' self.__canvas.itemconfigure(chip, outline=outline) def update_yourself(self, red, green, blue): assert self.__generator i = 1 chip = 0 chips = self.__chips = [] tk = self.__canvas.tk # get the red, green, and blue components for all chips for t in self.__generator(self.__numchips, red, green, blue): rrggbb = ColorDB.triplet_to_rrggbb(t) chips.append(rrggbb) tred, tgreen, tblue = t if tred <= red and tgreen <= green and tblue <= blue: chip = i i = i + 1 # call the raw tcl script colors = SPACE.join(chips) tk.eval('setcolor %s {%s}' % (self.__canvas._w, colors)) # move the arrows around self.__trackarrow(chip, (red, green, blue)) def set(self, label, generator): self.__canvas.itemconfigure(self.__label, text=label) self.__generator = generator class StripViewer: def __init__(self, switchboard, master=None): self.__sb = switchboard optiondb = switchboard.optiondb() # create a frame inside the master. frame = Frame(master, relief=RAISED, borderwidth=1) frame.grid(row=1, column=0, columnspan=2, sticky='NSEW') # create the options to be used later uwd = self.__uwdvar = BooleanVar() uwd.set(optiondb.get('UPWHILEDRAG', 0)) hexp = self.__hexpvar = BooleanVar() hexp.set(optiondb.get('HEXSTRIP', 0)) # create the red, green, blue strips inside their own frame frame1 = Frame(frame) frame1.pack(expand=YES, fill=BOTH) self.__reds = StripWidget(switchboard, frame1, generator=constant_cyan_generator, axis=0, label='Red Variations', uwdvar=uwd, hexvar=hexp) self.__greens = StripWidget(switchboard, frame1, generator=constant_magenta_generator, axis=1, label='Green Variations', uwdvar=uwd, hexvar=hexp) self.__blues = StripWidget(switchboard, frame1, generator=constant_yellow_generator, axis=2, label='Blue Variations', uwdvar=uwd, hexvar=hexp) # create a frame to contain the controls frame2 = Frame(frame) frame2.pack(expand=YES, fill=BOTH) frame2.columnconfigure(0, weight=20) frame2.columnconfigure(2, weight=20) padx = 8 # create the black button blackbtn = Button(frame2, text='Black', command=self.__toblack) blackbtn.grid(row=0, column=0, rowspan=2, sticky=W, padx=padx) # create the controls uwdbtn = Checkbutton(frame2, text='Update while dragging', variable=uwd) uwdbtn.grid(row=0, column=1, sticky=W) hexbtn = Checkbutton(frame2, text='Hexadecimal', variable=hexp, command=self.__togglehex) hexbtn.grid(row=1, column=1, sticky=W) # XXX: ignore this feature for now; it doesn't work quite right yet ## gentypevar = self.__gentypevar = IntVar() ## self.__variations = Radiobutton(frame, ## text='Variations', ## variable=gentypevar, ## value=0, ## command=self.__togglegentype) ## self.__variations.grid(row=0, column=1, sticky=W) ## self.__constants = Radiobutton(frame, ## text='Constants', ## variable=gentypevar, ## value=1, ## command=self.__togglegentype) ## self.__constants.grid(row=1, column=1, sticky=W) # create the white button whitebtn = Button(frame2, text='White', command=self.__towhite) whitebtn.grid(row=0, column=2, rowspan=2, sticky=E, padx=padx) def update_yourself(self, red, green, blue): self.__reds.update_yourself(red, green, blue) self.__greens.update_yourself(red, green, blue) self.__blues.update_yourself(red, green, blue) def __togglehex(self, event=None): red, green, blue = self.__sb.current_rgb() self.update_yourself(red, green, blue) ## def __togglegentype(self, event=None): ## which = self.__gentypevar.get() ## if which == 0: ## self.__reds.set(label='Red Variations', ## generator=constant_cyan_generator) ## self.__greens.set(label='Green Variations', ## generator=constant_magenta_generator) ## self.__blues.set(label='Blue Variations', ## generator=constant_yellow_generator) ## elif which == 1: ## self.__reds.set(label='Red Constant', ## generator=constant_red_generator) ## self.__greens.set(label='Green Constant', ## generator=constant_green_generator) ## self.__blues.set(label='Blue Constant', ## generator=constant_blue_generator) ## else: ## assert 0 ## self.__sb.update_views_current() def __toblack(self, event=None): self.__sb.update_views(0, 0, 0) def __towhite(self, event=None): self.__sb.update_views(255, 255, 255) def save_options(self, optiondb): optiondb['UPWHILEDRAG'] = self.__uwdvar.get() optiondb['HEXSTRIP'] = self.__hexpvar.get()
mit
Desarrollo-CeSPI/meran
dev-plugins/node/lib/node_modules/npm/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/SCons.py
42
5836
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ SCons generator. This contains class definitions and supporting functions for generating pieces of SCons files for the different types of GYP targets. """ import os def WriteList(fp, list, prefix='', separator=',\n ', preamble=None, postamble=None): fp.write(preamble or '') fp.write((separator or ' ').join([prefix + l for l in list])) fp.write(postamble or '') class TargetBase(object): """ Base class for a SCons representation of a GYP target. """ is_ignored = False target_prefix = '' target_suffix = '' def __init__(self, spec): self.spec = spec def full_product_name(self): """ Returns the full name of the product being built: * Uses 'product_name' if it's set, else prefix + 'target_name'. * Prepends 'product_dir' if set. * Appends SCons suffix variables for the target type (or product_extension). """ suffix = self.target_suffix product_extension = self.spec.get('product_extension') if product_extension: suffix = '.' + product_extension prefix = self.spec.get('product_prefix', self.target_prefix) name = self.spec['target_name'] name = prefix + self.spec.get('product_name', name) + suffix product_dir = self.spec.get('product_dir') if product_dir: name = os.path.join(product_dir, name) else: name = os.path.join(self.out_dir, name) return name def write_input_files(self, fp): """ Writes the definition of the input files (sources). """ sources = self.spec.get('sources') if not sources: fp.write('\ninput_files = []\n') return preamble = '\ninput_files = [\n ' postamble = ',\n]\n' WriteList(fp, map(repr, sources), preamble=preamble, postamble=postamble) def builder_call(self): """ Returns the actual SCons builder call to build this target. """ name = self.full_product_name() return 'env.%s(env.File(%r), input_files)' % (self.builder_name, name) def write_target(self, fp, src_dir='', pre=''): """ Writes the lines necessary to build this target. """ fp.write('\n' + pre) fp.write('_outputs = %s\n' % self.builder_call()) fp.write('target_files.extend(_outputs)\n') class NoneTarget(TargetBase): """ A GYP target type of 'none', implicitly or explicitly. """ def write_target(self, fp, pre=''): fp.write('\ntarget_files.extend(input_files)\n') class SettingsTarget(TargetBase): """ A GYP target type of 'settings'. """ is_ignored = True compilable_sources_template = """ _result = [] for infile in input_files: if env.compilable(infile): if (type(infile) == type('') and (infile.startswith(%(src_dir)r) or not os.path.isabs(env.subst(infile)))): # Force files below the build directory by replacing all '..' # elements in the path with '__': base, ext = os.path.splitext(os.path.normpath(infile)) base = [d == '..' and '__' or d for d in base.split('/')] base = os.path.join(*base) object = '${OBJ_DIR}/${COMPONENT_NAME}/${TARGET_NAME}/' + base if not infile.startswith(%(src_dir)r): infile = %(src_dir)r + infile infile = env.%(name)s(object, infile)[0] else: infile = env.%(name)s(infile)[0] _result.append(infile) input_files = _result """ class CompilableSourcesTargetBase(TargetBase): """ An abstract base class for targets that compile their source files. We explicitly transform compilable files into object files, even though SCons could infer that for us, because we want to control where the object file ends up. (The implicit rules in SCons always put the object file next to the source file.) """ intermediate_builder_name = None def write_target(self, fp, src_dir='', pre=''): if self.intermediate_builder_name is None: raise NotImplementedError if src_dir and not src_dir.endswith('/'): src_dir += '/' variables = { 'src_dir': src_dir, 'name': self.intermediate_builder_name, } fp.write(compilable_sources_template % variables) super(CompilableSourcesTargetBase, self).write_target(fp) class ProgramTarget(CompilableSourcesTargetBase): """ A GYP target type of 'executable'. """ builder_name = 'GypProgram' intermediate_builder_name = 'StaticObject' target_prefix = '${PROGPREFIX}' target_suffix = '${PROGSUFFIX}' out_dir = '${TOP_BUILDDIR}' class StaticLibraryTarget(CompilableSourcesTargetBase): """ A GYP target type of 'static_library'. """ builder_name = 'GypStaticLibrary' intermediate_builder_name = 'StaticObject' target_prefix = '${LIBPREFIX}' target_suffix = '${LIBSUFFIX}' out_dir = '${LIB_DIR}' class SharedLibraryTarget(CompilableSourcesTargetBase): """ A GYP target type of 'shared_library'. """ builder_name = 'GypSharedLibrary' intermediate_builder_name = 'SharedObject' target_prefix = '${SHLIBPREFIX}' target_suffix = '${SHLIBSUFFIX}' out_dir = '${LIB_DIR}' class LoadableModuleTarget(CompilableSourcesTargetBase): """ A GYP target type of 'loadable_module'. """ builder_name = 'GypLoadableModule' intermediate_builder_name = 'SharedObject' target_prefix = '${SHLIBPREFIX}' target_suffix = '${SHLIBSUFFIX}' out_dir = '${TOP_BUILDDIR}' TargetMap = { None : NoneTarget, 'none' : NoneTarget, 'settings' : SettingsTarget, 'executable' : ProgramTarget, 'static_library' : StaticLibraryTarget, 'shared_library' : SharedLibraryTarget, 'loadable_module' : LoadableModuleTarget, } def Target(spec): return TargetMap[spec.get('type')](spec)
gpl-3.0
tahoe/flask-restless
tests/test_processors.py
9
18908
""" tests.test_processors ~~~~~~~~~~~~~~~~~~~~~ Provides unit tests for pre- and post-processors hooks. :copyright: 2013 Mike Klimin <[email protected]> :copyright: 2012, 2013, 2014, 2015 Jeffrey Finkelstein <[email protected]> and contributors. :license: GNU AGPLv3+ or BSD """ from __future__ import with_statement from datetime import date from flask import json from flask.ext.restless import ProcessingException from .helpers import TestSupport dumps = json.dumps loads = json.loads class TestProcessors(TestSupport): """Unit tests for preprocessors and postprocessors.""" def setUp(self): """Creates the database, the :class:`~flask.Flask` object, the :class:`~flask_restless.manager.APIManager` for that application. """ # create the database super(TestProcessors, self).setUp() # to facilitate searching self.app.search = lambda url, q: self.app.get(url + '?q={0}'.format(q)) def test_get_single_preprocessor(self): """Tests :http:method:`get` requests for a single object with a preprocessor function. """ def check_permissions(**kw): raise ProcessingException(code=403, description='Permission denied') pre = dict(GET_SINGLE=[check_permissions]) self.manager.create_api(self.Person, methods=['GET', 'POST'], preprocessors=pre) response = self.app.post('/api/person', data=dumps({'name': u'test'})) assert 201 == response.status_code response = self.app.get('/api/person/1') assert response.status_code == 403 json_resp = loads(response.data) assert 'Permission denied' == json_resp['message'] def test_change_instance_id(self): """Tests that return values from preprocessors set the instance ID.""" # Create some people. alice = self.Person(id=1, name=u'Alice') bob = self.Person(id=2, name=u'Bob') eve = self.Person(id=3, name=u'Eve') self.session.add_all((alice, bob, eve)) self.session.commit() # Define the preprocessor function, which increments the primary key. def increment(instance_id=None, **kw): if instance_id is None: raise Exception return int(instance_id) + 1 # Create an API with the incrementing preprocessor. pre = dict(GET_SINGLE=[increment], PATCH_SINGLE=[increment], DELETE_SINGLE=[increment]) self.manager.create_api(self.Person, methods=['GET', 'PATCH', 'DELETE'], preprocessors=pre) # Create an API where the incrementing preprocessor happens twice. pre = dict(GET_SINGLE=[increment, increment]) self.manager.create_api(self.Person, url_prefix='/api/v2', methods=['GET'], preprocessors=pre) # Request the person with ID 1; the preprocessor should cause this to # return the person with ID 2. Similarly for the person with ID 2. response = self.app.get('/api/person/1') assert response.status_code == 200 data = loads(response.data) assert data['id'] == 2 assert data['name'] == u'Bob' response = self.app.get('/api/person/2') assert response.status_code == 200 data = loads(response.data) assert data['id'] == 3 assert data['name'] == u'Eve' # Request the person with ID 1; the preprocessor should cause this to # return the person with ID 3. response = self.app.get('/api/v2/person/1') assert response.status_code == 200 data = loads(response.data) assert data['id'] == 3 assert data['name'] == u'Eve' # After this patch request, the person with ID *2* should have the name # Paul. The response should include the JSON representation of the # person with ID *2*, since that is how the view function acts as if it # receives ID 2. data = dumps(dict(name='Paul')) response = self.app.patch('/api/person/1', data=data) assert response.status_code == 200 data = loads(response.data) assert data['id'] == 2 assert data['name'] == u'Paul' # Finally, send a request to delete the person with ID 1, but the # preprocessor increments the ID, so person number 2 should actually be # deleted. response = self.app.delete('/api/person/1') assert response.status_code == 204 # Check that there are only two people in the database, and neither of # them is the person with ID 2. response = self.app.get('/api/person') assert response.status_code == 200 data = loads(response.data)['objects'] assert len(data) == 2 assert data[0]['id'] != 2 assert data[1]['id'] != 2 def test_get_many_postprocessor(self): filt = dict(name='id', op='in', val=[1, 3]) def foo(search_params=None, **kw): assert filt in search_params['filters'] post = dict(GET_MANY=[foo]) self.manager.create_api(self.Person, methods=['GET', 'POST'], postprocessors=post) query = dict(filters=[filt]) response = self.app.search('/api/person', dumps(query)) assert response.status_code == 200 def test_get_many_preprocessor(self): def check_permissions(search_params=None, **kw): filt = {u'name': u'id', u'op': u'in', u'val': [1, 3]} if 'filters' not in search_params: search_params['filters'] = [filt] else: search_params['filters'].append(filt) pre = dict(GET_MANY=[check_permissions]) self.manager.create_api(self.Person, methods=['GET', 'POST'], preprocessors=pre) self.app.post('/api/person', data=dumps({'name': u'Lincoln', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Lucy', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Mary', 'age': 25})) response = self.app.get('/api/person') objs = loads(response.data)['objects'] ids = [obj['id'] for obj in objs] assert ids == [1, 3] assert response.status_code == 200 search = dict(filters=[dict(name='name', val='Lincoln', op='equals')]) response = self.app.search('/api/person', dumps(search)) num_results = loads(response.data)['num_results'] assert num_results == 1 assert response.status_code == 200 def test_post_preprocessor(self): """Tests :http:method:`post` requests with a preprocessor function.""" def add_parameter(data=None, **kw): if data: data['other'] = 7 def check_permissions(data=None, **kw): raise ProcessingException(code=403, description='Permission denied') self.manager.create_api(self.Person, methods=['POST'], url_prefix='/api/v2', preprocessors=dict(POST=[add_parameter])) self.manager.create_api(self.Person, methods=['POST'], url_prefix='/api/v3', preprocessors=dict(POST=[check_permissions])) response = self.app.post('/api/v2/person', data=dumps({'name': u'Lincoln', 'age': 23})) assert response.status_code == 201 personid = loads(response.data)['id'] person = self.session.query(self.Person).filter_by(id=personid).first() assert person.other == 7 response = self.app.post('/api/v3/person', data=dumps({'name': u'Lincoln', 'age': 23})) assert response.status_code == 403 json_resp = loads(response.data) assert 'Permission denied' == json_resp['message'] def test_delete_preprocessor(self): """Tests for using a preprocessor with :http:method:`delete` requests. """ def check_permissions(**kw): raise ProcessingException(code=403, description='Permission denied') pre = dict(DELETE_SINGLE=[check_permissions]) # recreate the api at /api/v1/person self.manager.create_api(self.Person, methods=['POST', 'DELETE'], preprocessors=pre) # Creating some people self.app.post('/api/person', data=dumps({'name': u'Lincoln', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Lucy', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Mary', 'age': 25})) # Try deleting it response = self.app.delete('/api/person/1') assert response.status_code == 403 json_resp = loads(response.data) assert 'Permission denied' == json_resp['message'] # Making sure it has been not deleted people = self.session.query(self.Person).filter_by(id=1) assert people.count() == 1 def test_patch_single_preprocessor(self): """Tests for using a preprocessor with :http:method:`patch` requests. """ def check_permissions(**kw): raise ProcessingException(code=403, description='Permission denied') pre = dict(PATCH_SINGLE=[check_permissions]) # recreate the api at /api/v1/person self.manager.create_api(self.Person, methods=['PATCH', 'POST'], preprocessors=pre) # Creating some test people self.app.post('/api/person', data=dumps({'name': u'Lincoln', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Lucy', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Mary', 'age': 25})) # Try updating people with id=1 response = self.app.patch('/api/person/1', data=dumps({'age': 27})) assert response.status_code == 403 json_resp = loads(response.data) assert 'Permission denied' == json_resp['message'] def test_patch_single_preprocessor2(self): """Tests for using a preprocessor with :http:method:`patch` requests. """ def update_data(data=None, **kw): data['other'] = 27 pre = dict(PATCH_SINGLE=[update_data]) # recreate the api at /api/v1/person self.manager.create_api(self.Person, methods=['GET', 'PATCH', 'POST'], preprocessors=pre) # Creating some test people self.app.post('/api/person', data=dumps({'name': u'Lincoln', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Lucy', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Mary', 'age': 25})) # Try updating people with id=1 response = self.app.patch('/api/person/1', data=dumps({'age': 27})) assert response.status_code == 200 resp = self.app.get('/api/person/1') assert resp.status_code == 200 assert loads(resp.data)['age'] == 27 assert loads(resp.data)['other'] == 27 def test_delete_single(self): """Test for the DELETE_SINGLE preprocessor.""" # Create a preprocessor function that only allows deleting a Person # instance with ID 2. def must_have_id_2(instance_id=None, **kw): if int(instance_id) != 2: raise ProcessingException(description='hey', code=400) pre = dict(DELETE_SINGLE=[must_have_id_2]) self.manager.create_api(self.Person, methods=['GET', 'DELETE'], preprocessors=pre) # Add three people to the database. self.session.add(self.Person(id=1)) self.session.add(self.Person(id=2)) self.session.add(self.Person(id=3)) self.session.commit() # Trying to delete Person instances with ID 1 and 3 should cause a # processing exception, resulting in a HTTP 400 response. response = self.app.delete('/api/person/1') assert response.status_code == 400 response = self.app.delete('/api/person/3') assert response.status_code == 400 # Trying to delete person 2 should work response = self.app.delete('/api/person/2') print(response.data) assert response.status_code == 204 response = self.app.get('/api/person') assert response.status_code == 200 data = loads(response.data)['objects'] assert 2 not in [person['id'] for person in data] def test_delete_many_preprocessor(self): # Create a preprocessor function that adds a filter. def add_filter(search_params=None, **kw): filt = dict(name='age', op='eq', val=23) if search_params is None: search_params = {} if 'filters' not in search_params: search_params['filters'] = [] search_params['filters'].append(filt) pre = dict(DELETE_MANY=[add_filter]) # recreate the api at /api/v1/person self.manager.create_api(self.Person, methods=['GET', 'POST', 'DELETE'], allow_delete_many=True, preprocessors=pre) self.session.add(self.Person(name=u'foo', age=23)) self.session.add(self.Person(name=u'bar', age=23)) self.session.add(self.Person(name=u'baz', age=25)) self.session.commit() # Deleting only those that have age 23 by using the filter added by the # preprocessor. response = self.app.delete('/api/person') assert response.status_code == 200 assert loads(response.data)['num_deleted'] == 2 # Finally, testing if the change was made response = self.app.get('/api/person') data = loads(response.data)['objects'] assert len(data) == 1 assert data[0]['name'] == u'baz' def test_patch_many_preprocessor(self): """Tests for using a preprocessor with :http:method:`patch` requests which request changes to many objects. """ def update_data(data=None, **kw): data['other'] = 27 pre = dict(PATCH_MANY=[update_data]) # recreate the api at /api/v1/person self.manager.create_api(self.Person, methods=['GET', 'POST', 'PATCH'], allow_patch_many=True, preprocessors=pre) # Creating some people self.app.post('/api/person', data=dumps({'name': u'Lincoln', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Lucy', 'age': 23})) self.app.post('/api/person', data=dumps({'name': u'Mary', 'age': 25})) # Changing the birth date field of the entire collection day, month, year = 15, 9, 1986 birth_date = date(year, month, day).strftime('%d/%m/%Y') # iso8601 form = {'birth_date': birth_date} response = self.app.patch('/api/person', data=dumps(form)) # Finally, testing if the change was made response = self.app.get('/api/person') loaded = loads(response.data)['objects'] for i in loaded: expected = '{0:4d}-{1:02d}-{2:02d}'.format(year, month, day) assert i['birth_date'] == expected assert i['other'] == 27 def test_processor_no_change(self): """Tests :http:method:`post` requests with a preprocessor function. that makes no change to the data""" def no_change(**kw): pass self.manager.create_api(self.Person, methods=['GET', 'POST'], url_prefix='/api/v2', preprocessors=dict(POST=[no_change], GET_SINGLE=[no_change], GET_MANY=[no_change])) response = self.app.post('/api/v2/person', data=dumps({'name': u'Lincoln', 'age': 23})) assert response.status_code == 201 personid = loads(response.data)['id'] person = self.session.query(self.Person).filter_by(id=personid).first() assert person.name == u'Lincoln' assert person.age == 23 # Test for GET_SINGLE response = self.app.get('/api/v2/person/{0:d}'.format(personid)) assert response.status_code == 200 person_response = loads(response.data) assert person_response['name'] == person.name assert person_response['age'] == person.age # Test for GET_MANY response = self.app.get('/api/v2/person') assert response.status_code == 200 person_response = loads(response.data)["objects"][0] assert person_response['name'] == person.name assert person_response['age'] == person.age def test_add_filters(self): """Test for adding a filter to a :http:method:`get` request for a collection where there was no query parameter before. """ # Create some people in the database. person1 = self.Person(name=u'foo') person2 = self.Person(name=u'bar') person3 = self.Person(name=u'baz') self.session.add_all((person1, person2, person3)) self.session.commit() # Create a preprocessor function that adds a filter. def add_filter(search_params=None, **kw): if search_params is None: return filt = dict(name='name', op='like', val=u'ba%') if 'filters' not in search_params: search_params['filters'] = [] search_params['filters'].append(filt) # Create the API with the preprocessor. self.manager.create_api(self.Person, preprocessors=dict(GET_MANY=[add_filter])) # Test that the filter is added on GET requests to the collection. response = self.app.get('/api/person') assert 200 == response.status_code data = loads(response.data)['objects'] assert 2 == len(data) assert sorted(['bar', 'baz']) == sorted([person['name'] for person in data])
agpl-3.0
dr0pz0ne/sibble
lib/ansible/module_utils/api.py
10
3198
# # (c) 2015 Brian Ccoa, <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # """ This module adds shared support for generic api modules In order to use this module, include it as part of a custom module as shown below. ** Note: The order of the import statements does matter. ** from ansible.module_utils.basic import * from ansible.module_utils.api import * The 'api' module provides the following common argument specs: * rate limit spec - rate: number of requests per time unit (int) - rate_limit: time window in which the limit is applied in seconds * retry spec - retries: number of attempts - retry_pause: delay between attempts in seconds """ import time def rate_limit_argument_spec(spec=None): """Creates an argument spec for working with rate limiting""" arg_spec = (dict( rate=dict(type='int'), rate_limit=dict(type='int'), )) if spec: arg_spec.update(spec) return arg_spec def retry_argument_spec(spec=None): """Creates an argument spec for working with retrying""" arg_spec = (dict( retries=dict(type='int'), retry_pause=dict(type='float', default=1), )) if spec: arg_spec.update(spec) return arg_spec def rate_limit(rate=None, rate_limit=None): """rate limiting decorator""" minrate = None if rate is not None and rate_limit is not None: minrate = float(rate_limit) / float(rate) def wrapper(f): last = [0.0] def ratelimited(*args,**kwargs): if minrate is not None: elapsed = time.clock() - last[0] left = minrate - elapsed if left > 0: time.sleep(left) last[0] = time.clock() ret = f(*args,**kwargs) return ret return ratelimited return wrapper def retry(retries=None, retry_pause=1): """Retry decorator""" def wrapper(f): retry_count = 0 def retried(*args,**kwargs): if retries is not None: ret = None while True: retry_count += 1 if retry_count >= retries: raise Exception("Retry limit exceeded: %d" % retries) try: ret = f(*args,**kwargs) except: pass if ret: break time.sleep(retry_pause) return ret return retried return wrapper
gpl-3.0
ddboline/pylearn2
pylearn2/utils/timing.py
49
2601
"""Utilities related to timing various segments of code.""" __authors__ = "David Warde-Farley" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["David Warde-Farley"] __license__ = "3-clause BSD" __maintainer__ = "David Warde-Farley" __email__ = "wardefar@iro" from contextlib import contextmanager import logging import datetime def total_seconds(delta): """ Extract the total number of seconds from a timedelta object in a way that is compatible with Python <= 2.6. Parameters ---------- delta : object A `datetime.timedelta` object. Returns ------- total : float The time quantity represented by `delta` in seconds, with a fractional portion. """ if hasattr(delta, 'total_seconds'): return delta.total_seconds() else: return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6 ) / float(10 ** 6) @contextmanager def log_timing(logger, task, level=logging.INFO, final_msg=None, callbacks=None): """ Context manager that logs the start/end of an operation, and timing information, to a given logger. Parameters ---------- logger : object A Python standard library logger object, or an object that supports the `logger.log(level, message, ...)` API it defines. task : str A string indicating the operation being performed. A '...' will be appended to the initial logged message. If `None`, no initial message will be printed. level : int, optional The log level to use. Default `logging.INFO`. final_msg : str, optional Display this before the reported time instead of '<task> done. Time elapsed:'. A space will be added between this message and the reported time. callbacks: list, optional A list of callbacks taking as argument an integer representing the total number of seconds. """ start = datetime.datetime.now() if task is not None: logger.log(level, str(task) + '...') yield end = datetime.datetime.now() delta = end - start total = total_seconds(delta) if total < 60: delta_str = '%f seconds' % total else: delta_str = str(delta) if final_msg is None: logger.log(level, str(task) + ' done. Time elapsed: %s' % delta_str) else: logger.log(level, ' '.join((final_msg, delta_str))) if callbacks is not None: for callback in callbacks: callback(total)
bsd-3-clause
CydarLtd/ansible
lib/ansible/modules/net_tools/dnsmadeeasy.py
30
14042
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: dnsmadeeasy version_added: "1.3" short_description: Interface with dnsmadeeasy.com (a DNS hosting service). description: - > Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) options: account_key: description: - Account API Key. required: true default: null account_secret: description: - Account Secret Key. required: true default: null domain: description: - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution required: true default: null record_name: description: - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument. required: false default: null record_type: description: - Record type. required: false choices: [ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] default: null record_value: description: - > Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>, SRV: <priority> <weight> <port> <target name>, TXT: <text value>" - > If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl) required: false default: null record_ttl: description: - record's "Time to live". Number of seconds the record remains cached in DNS servers. required: false default: 1800 state: description: - whether the record should exist or not required: true choices: [ 'present', 'absent' ] default: null validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 notes: - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. requirements: [ hashlib, hmac ] author: "Brice Burgess (@briceburg)" ''' EXAMPLES = ''' # fetch my.com domain records - dnsmadeeasy: account_key: key account_secret: secret domain: my.com state: present register: response # create / ensure the presence of a record - dnsmadeeasy: account_key: key account_secret: secret domain: my.com state: present record_name: test record_type: A record_value: 127.0.0.1 # update the previously created record - dnsmadeeasy: account_key: key account_secret: secret domain: my.com state: present record_name: test record_value: 192.0.2.23 # fetch a specific record - dnsmadeeasy: account_key: key account_secret: secret domain: my.com state: present record_name: test register: response # delete a record / ensure it is absent - dnsmadeeasy: account_key: key account_secret: secret domain: my.com state: absent record_name: test ''' # ============================================ # DNSMadeEasy module specific support methods. # import urllib IMPORT_ERROR = None try: import json from time import strftime, gmtime import hashlib import hmac except ImportError: e = get_exception() IMPORT_ERROR = str(e) class DME2: def __init__(self, apikey, secret, domain, module): self.module = module self.api = apikey self.secret = secret self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' self.domain = str(domain) self.domain_map = None # ["domain_name"] => ID self.record_map = None # ["record_name"] => ID self.records = None # ["record_ID"] => <record> self.all_records = None # Lookup the domain ID if passed as a domain name vs. ID if not self.domain.isdigit(): self.domain = self.getDomainByName(self.domain)['id'] self.record_url = 'dns/managed/' + str(self.domain) + '/records' def _headers(self): currTime = self._get_date() hashstring = self._create_hash(currTime) headers = {'x-dnsme-apiKey': self.api, 'x-dnsme-hmac': hashstring, 'x-dnsme-requestDate': currTime, 'content-type': 'application/json'} return headers def _get_date(self): return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) def _create_hash(self, rightnow): return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() def query(self, resource, method, data=None): url = self.baseurl + resource if data and not isinstance(data, basestring): data = urllib.urlencode(data) response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) if info['status'] not in (200, 201, 204): self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) try: return json.load(response) except Exception: return {} def getDomain(self, domain_id): if not self.domain_map: self._instMap('domain') return self.domains.get(domain_id, False) def getDomainByName(self, domain_name): if not self.domain_map: self._instMap('domain') return self.getDomain(self.domain_map.get(domain_name, 0)) def getDomains(self): return self.query('dns/managed', 'GET')['data'] def getRecord(self, record_id): if not self.record_map: self._instMap('record') return self.records.get(record_id, False) # Try to find a single record matching this one. # How we do this depends on the type of record. For instance, there # can be several MX records for a single record_name while there can # only be a single CNAME for a particular record_name. Note also that # there can be several records with different types for a single name. def getMatchingRecord(self, record_name, record_type, record_value): # Get all the records if not already cached if not self.all_records: self.all_records = self.getRecords() if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]: for result in self.all_records: if result['name'] == record_name and result['type'] == record_type: return result return False elif record_type in ["MX", "NS", "TXT", "SRV"]: for result in self.all_records: if record_type == "MX": value = record_value.split(" ")[1] elif record_type == "SRV": value = record_value.split(" ")[3] else: value = record_value if result['name'] == record_name and result['type'] == record_type and result['value'] == value: return result return False else: raise Exception('record_type not yet supported') def getRecords(self): return self.query(self.record_url, 'GET')['data'] def _instMap(self, type): #@TODO cache this call so it's executed only once per ansible execution map = {} results = {} # iterate over e.g. self.getDomains() || self.getRecords() for result in getattr(self, 'get' + type.title() + 's')(): map[result['name']] = result['id'] results[result['id']] = result # e.g. self.domain_map || self.record_map setattr(self, type + '_map', map) setattr(self, type + 's', results) # e.g. self.domains || self.records def prepareRecord(self, data): return json.dumps(data, separators=(',', ':')) def createRecord(self, data): #@TODO update the cache w/ resultant record + id when impleneted return self.query(self.record_url, 'POST', data) def updateRecord(self, record_id, data): #@TODO update the cache w/ resultant record + id when impleneted return self.query(self.record_url + '/' + str(record_id), 'PUT', data) def deleteRecord(self, record_id): #@TODO remove record from the cache when impleneted return self.query(self.record_url + '/' + str(record_id), 'DELETE') # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( account_key=dict(required=True), account_secret=dict(required=True, no_log=True), domain=dict(required=True), state=dict(required=True, choices=['present', 'absent']), record_name=dict(required=False), record_type=dict(required=False, choices=[ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), record_value=dict(required=False), record_ttl=dict(required=False, default=1800, type='int'), validate_certs = dict(default='yes', type='bool'), ), required_together=( ['record_value', 'record_ttl', 'record_type'] ) ) if IMPORT_ERROR: module.fail_json(msg="Import Error: " + IMPORT_ERROR) DME = DME2(module.params["account_key"], module.params[ "account_secret"], module.params["domain"], module) state = module.params["state"] record_name = module.params["record_name"] record_type = module.params["record_type"] record_value = module.params["record_value"] # Follow Keyword Controlled Behavior if record_name is None: domain_records = DME.getRecords() if not domain_records: module.fail_json( msg="The requested domain name is not accessible with this api_key; try using its ID if known.") module.exit_json(changed=False, result=domain_records) # Fetch existing record + Build new one current_record = DME.getMatchingRecord(record_name, record_type, record_value) new_record = {'name': record_name} for i in ["record_value", "record_type", "record_ttl"]: if not module.params[i] is None: new_record[i[len("record_"):]] = module.params[i] # Special handling for mx record if new_record["type"] == "MX": new_record["mxLevel"] = new_record["value"].split(" ")[0] new_record["value"] = new_record["value"].split(" ")[1] # Special handling for SRV records if new_record["type"] == "SRV": new_record["priority"] = new_record["value"].split(" ")[0] new_record["weight"] = new_record["value"].split(" ")[1] new_record["port"] = new_record["value"].split(" ")[2] new_record["value"] = new_record["value"].split(" ")[3] # Compare new record against existing one changed = False if current_record: for i in new_record: if str(current_record[i]) != str(new_record[i]): changed = True new_record['id'] = str(current_record['id']) # Follow Keyword Controlled Behavior if state == 'present': # return the record if no value is specified if not "value" in new_record: if not current_record: module.fail_json( msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain'])) module.exit_json(changed=False, result=current_record) # create record as it does not exist if not current_record: record = DME.createRecord(DME.prepareRecord(new_record)) module.exit_json(changed=True, result=record) # update the record if changed: DME.updateRecord( current_record['id'], DME.prepareRecord(new_record)) module.exit_json(changed=True, result=new_record) # return the record (no changes) module.exit_json(changed=False, result=current_record) elif state == 'absent': # delete the record if it exists if current_record: DME.deleteRecord(current_record['id']) module.exit_json(changed=True) # record does not exist, return w/o change. module.exit_json(changed=False) else: module.fail_json( msg="'%s' is an unknown value for the state argument" % state) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main()
gpl-3.0
vFense/vFenseAgent-nix
agent/deps/rpm/Python-2.7.5/lib/python2.7/test/test_strop.py
88
6374
import warnings warnings.filterwarnings("ignore", "strop functions are obsolete;", DeprecationWarning, r'test.test_strop|unittest') import strop import unittest from test import test_support class StropFunctionTestCase(unittest.TestCase): def test_atoi(self): self.assertTrue(strop.atoi(" 1 ") == 1) self.assertRaises(ValueError, strop.atoi, " 1x") self.assertRaises(ValueError, strop.atoi, " x1 ") def test_atol(self): self.assertTrue(strop.atol(" 1 ") == 1L) self.assertRaises(ValueError, strop.atol, " 1x") self.assertRaises(ValueError, strop.atol, " x1 ") def test_atof(self): self.assertTrue(strop.atof(" 1 ") == 1.0) self.assertRaises(ValueError, strop.atof, " 1x") self.assertRaises(ValueError, strop.atof, " x1 ") def test_capitalize(self): self.assertTrue(strop.capitalize(" hello ") == " hello ") self.assertTrue(strop.capitalize("hello ") == "Hello ") def test_find(self): self.assertTrue(strop.find("abcdefghiabc", "abc") == 0) self.assertTrue(strop.find("abcdefghiabc", "abc", 1) == 9) self.assertTrue(strop.find("abcdefghiabc", "def", 4) == -1) def test_rfind(self): self.assertTrue(strop.rfind("abcdefghiabc", "abc") == 9) def test_lower(self): self.assertTrue(strop.lower("HeLLo") == "hello") def test_upper(self): self.assertTrue(strop.upper("HeLLo") == "HELLO") def test_swapcase(self): self.assertTrue(strop.swapcase("HeLLo cOmpUteRs") == "hEllO CoMPuTErS") def test_strip(self): self.assertTrue(strop.strip(" \t\n hello \t\n ") == "hello") def test_lstrip(self): self.assertTrue(strop.lstrip(" \t\n hello \t\n ") == "hello \t\n ") def test_rstrip(self): self.assertTrue(strop.rstrip(" \t\n hello \t\n ") == " \t\n hello") def test_replace(self): replace = strop.replace self.assertTrue(replace("one!two!three!", '!', '@', 1) == "one@two!three!") self.assertTrue(replace("one!two!three!", '!', '@', 2) == "one@two@three!") self.assertTrue(replace("one!two!three!", '!', '@', 3) == "one@two@three@") self.assertTrue(replace("one!two!three!", '!', '@', 4) == "one@two@three@") # CAUTION: a replace count of 0 means infinity only to strop, # not to the string .replace() method or to the # string.replace() function. self.assertTrue(replace("one!two!three!", '!', '@', 0) == "one@two@three@") self.assertTrue(replace("one!two!three!", '!', '@') == "one@two@three@") self.assertTrue(replace("one!two!three!", 'x', '@') == "one!two!three!") self.assertTrue(replace("one!two!three!", 'x', '@', 2) == "one!two!three!") def test_split(self): split = strop.split self.assertTrue(split("this is the split function") == ['this', 'is', 'the', 'split', 'function']) self.assertTrue(split("a|b|c|d", '|') == ['a', 'b', 'c', 'd']) self.assertTrue(split("a|b|c|d", '|', 2) == ['a', 'b', 'c|d']) self.assertTrue(split("a b c d", None, 1) == ['a', 'b c d']) self.assertTrue(split("a b c d", None, 2) == ['a', 'b', 'c d']) self.assertTrue(split("a b c d", None, 3) == ['a', 'b', 'c', 'd']) self.assertTrue(split("a b c d", None, 4) == ['a', 'b', 'c', 'd']) self.assertTrue(split("a b c d", None, 0) == ['a', 'b', 'c', 'd']) self.assertTrue(split("a b c d", None, 2) == ['a', 'b', 'c d']) def test_join(self): self.assertTrue(strop.join(['a', 'b', 'c', 'd']) == 'a b c d') self.assertTrue(strop.join(('a', 'b', 'c', 'd'), '') == 'abcd') self.assertTrue(strop.join(Sequence()) == 'w x y z') # try a few long ones self.assertTrue(strop.join(['x' * 100] * 100, ':') == (('x' * 100) + ":") * 99 + "x" * 100) self.assertTrue(strop.join(('x' * 100,) * 100, ':') == (('x' * 100) + ":") * 99 + "x" * 100) def test_maketrans(self): self.assertTrue(strop.maketrans("abc", "xyz") == transtable) self.assertRaises(ValueError, strop.maketrans, "abc", "xyzq") def test_translate(self): self.assertTrue(strop.translate("xyzabcdef", transtable, "def") == "xyzxyz") def test_data_attributes(self): strop.lowercase strop.uppercase strop.whitespace @test_support.precisionbigmemtest(size=test_support._2G - 1, memuse=5) def test_stropjoin_huge_list(self, size): a = "A" * size try: r = strop.join([a, a], a) except OverflowError: pass else: self.assertEqual(len(r), len(a) * 3) @test_support.precisionbigmemtest(size=test_support._2G - 1, memuse=1) def test_stropjoin_huge_tup(self, size): a = "A" * size try: r = strop.join((a, a), a) except OverflowError: pass # acceptable on 32-bit else: self.assertEqual(len(r), len(a) * 3) transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377' # join() now works with any sequence type. class Sequence: def __init__(self): self.seq = 'wxyz' def __len__(self): return len(self.seq) def __getitem__(self, i): return self.seq[i] def test_main(): test_support.run_unittest(StropFunctionTestCase) if __name__ == "__main__": test_main()
lgpl-3.0
prisis/sublime-text-packages
Packages/SublimeCodeIntel/libs/_ielementtree.py
8
1294
import sys import struct VERSION = sys.version_info[:2] PLATFORM = sys.platform ARCH = 'x%d' % (struct.calcsize('P') * 8) if VERSION >= (3, 3): platform = None try: from _local_arch._ielementtree import * platform = "Local arch" except ImportError: if PLATFORM == 'darwin': from _macosx_universal_py33._ielementtree import * platform = "MacOS X Universal" elif PLATFORM.startswith('linux'): if ARCH == 'x64': from _linux_libcpp6_x86_64_py33._ielementtree import * platform = "Linux 64 bits" elif ARCH == 'x32': from _linux_libcpp6_x86_py33._ielementtree import * platform = "Linux 32 bits" elif PLATFORM.startswith('win'): if ARCH == 'x64': from _win64_py33._ielementtree import * platform = "Windows 64 bits" elif ARCH == 'x32': from _win32_py33._ielementtree import * platform = "Windows 32 bits" if not platform: raise ImportError("Could not find a suitable _ielementtree binary for your platform and architecture.") elif VERSION >= (2, 6): from ciElementTree import * from ciElementTree import _patched_for_komodo_
mit
john-parton/django-oscar
src/oscar/apps/catalogue/utils.py
24
6072
import os import shutil import tarfile import tempfile import zipfile import zlib from django.core.exceptions import FieldError from django.core.files import File from django.db.transaction import atomic from django.utils.translation import ugettext_lazy as _ from PIL import Image from oscar.apps.catalogue.exceptions import ( IdenticalImageError, ImageImportError, InvalidImageArchive) from oscar.core.loading import get_model Category = get_model('catalogue', 'category') Product = get_model('catalogue', 'product') ProductImage = get_model('catalogue', 'productimage') # This is an old class only really intended to be used by the internal sandbox # site. It's not recommended to be used by your project. class Importer(object): allowed_extensions = ['.jpeg', '.jpg', '.gif', '.png'] def __init__(self, logger, field): self.logger = logger self._field = field @atomic # noqa (too complex (10)) def handle(self, dirname): stats = { 'num_processed': 0, 'num_skipped': 0, 'num_invalid': 0} image_dir, filenames = self._get_image_files(dirname) if image_dir: for filename in filenames: try: lookup_value \ = self._get_lookup_value_from_filename(filename) self._process_image(image_dir, filename, lookup_value) stats['num_processed'] += 1 except Product.MultipleObjectsReturned: self.logger.warning("Multiple products matching %s='%s'," " skipping" % (self._field, lookup_value)) stats['num_skipped'] += 1 except Product.DoesNotExist: self.logger.warning("No item matching %s='%s'" % (self._field, lookup_value)) stats['num_skipped'] += 1 except IdenticalImageError: self.logger.warning("Identical image already exists for" " %s='%s', skipping" % (self._field, lookup_value)) stats['num_skipped'] += 1 except IOError as e: stats['num_invalid'] += 1 raise ImageImportError(_('%(filename)s is not a valid' ' image (%(error)s)') % {'filename': filename, 'error': e}) except FieldError as e: raise ImageImportError(e) if image_dir != dirname: shutil.rmtree(image_dir) else: raise InvalidImageArchive(_('%s is not a valid image archive') % dirname) self.logger.info("Finished image import: %(num_processed)d imported," " %(num_skipped)d skipped" % stats) def _get_image_files(self, dirname): filenames = [] image_dir = self._extract_images(dirname) if image_dir: for filename in os.listdir(image_dir): ext = os.path.splitext(filename)[1] if os.path.isfile(os.path.join(image_dir, filename)) \ and ext in self.allowed_extensions: filenames.append(filename) return image_dir, filenames def _extract_images(self, dirname): ''' Returns path to directory containing images in dirname if successful. Returns empty string if dirname does not exist, or could not be opened. Assumes that if dirname is a directory, then it contains images. If dirname is an archive (tar/zip file) then the path returned is to a temporary directory that should be deleted when no longer required. ''' if os.path.isdir(dirname): return dirname ext = os.path.splitext(dirname)[1] if ext in ['.gz', '.tar']: image_dir = tempfile.mkdtemp() try: tar_file = tarfile.open(dirname) tar_file.extractall(image_dir) tar_file.close() return image_dir except (tarfile.TarError, zlib.error): return "" elif ext == '.zip': image_dir = tempfile.mkdtemp() try: zip_file = zipfile.ZipFile(dirname) zip_file.extractall(image_dir) zip_file.close() return image_dir except (zlib.error, zipfile.BadZipfile, zipfile.LargeZipFile): return "" # unknown archive - perhaps this should be treated differently return "" def _process_image(self, dirname, filename, lookup_value): file_path = os.path.join(dirname, filename) trial_image = Image.open(file_path) trial_image.verify() kwargs = {self._field: lookup_value} item = Product._default_manager.get(**kwargs) new_data = open(file_path, 'rb').read() next_index = 0 for existing in item.images.all(): next_index = existing.display_order + 1 try: if new_data == existing.original.read(): raise IdenticalImageError() except IOError: # File probably doesn't exist existing.delete() new_file = File(open(file_path, 'rb')) im = ProductImage(product=item, display_order=next_index) im.original.save(filename, new_file, save=False) im.save() self.logger.debug('Image added to "%s"' % item) def _fetch_item(self, filename): kwargs = {self._field: self._get_lookup_value_from_filename(filename)} return Product._default_manager.get(**kwargs) def _get_lookup_value_from_filename(self, filename): return os.path.splitext(filename)[0]
bsd-3-clause
mutirri/bokeh
sphinx/source/docs/tutorials/exercises/periodic.py
23
3314
from bokeh.plotting import figure, output_file, show from bokeh.models import HoverTool, ColumnDataSource from bokeh.sampledata import periodic_table # categories need to be strings elements = periodic_table.elements[periodic_table.elements['group'] != "-"] # The categorical ranges need to be strings, so convert the groups and periods group_range = [str(x) for x in range(1,19)] period_range = [str(x) for x in reversed(sorted(set(elements['period'])))] # Output static HTML file output_file("periodic.html") # I like this colormap OK, but feel free to change it up colormap = { 'alkali metal' : "#a6cee3", 'alkaline earth metal' : "#1f78b4", 'halogen' : "#fdbf6f", 'metal' : "#b2df8a", 'metalloid' : "#33a02c", 'noble gas' : "#bbbb88", 'nonmetal' : "#baa2a6", 'transition metal' : "#e08e79", } # There are lots of things about each element we might want a hover tool # to be able to display, so put them all in a ColumnDataSource source = ColumnDataSource( data=dict( group=[str(x) for x in elements['group']], period=[str(y) for y in elements['period']], # these are "categorical coordinates" symx=[str(x)+":0.1" for x in elements['group']], numbery=[str(x)+":0.8" for x in elements['period']], massy=[str(x)+":0.15" for x in elements['period']], namey=[str(x)+":0.3" for x in elements['period']], sym=elements['symbol'], name=elements['name'], cpk=elements['CPK'], atomic_number=elements['atomic number'], electronic=elements['electronic configuration'], mass=elements['atomic mass'], type=elements['metal'], type_color=[colormap[x] for x in elements['metal']], ) ) # create a figure p = figure(title="Periodic Table", tools="resize,hover", x_range=group_range, y_range=period_range, plot_width=1200) # EXERCISE: add a `rect` renderer to display a rectangle at each group and column # Use group_range for x_range and period_range for y_range. # EXERCISE: we will be setting several of the same properties on the text renderers # below. Add to this dictionary to set the text alignment to 'left' and the text # baseline to 'middle' text_props = { } # Since text can be interpreted as a data source field name in general, and the # category names and locations are text, we have to specify the fields a little # more verbosely with a dictionary, as below p.text(x=dict(field="symx", units="data"), y=dict(field="period", units="data"), text=dict(field="sym", units="data"), text_font_style="bold", text_font_size="15pt", **text_props) # EXERCISE: add text that displays the atomic number in each square with 9pt font. # Use 'numbery' for the y position. # EXERCISE: add text that displays the full name in each square with 6pt font # Use 'namey' for the y position. # EXERCISE: add text that displays the atomic mass each square in 5pt font # Use 'massy' for the y position. # turn off the grid lines p.grid.grid_line_color = None # EXERCISE: configure a hover tool that displays the following: # * name # * atomic number # * type # * atomic mass # * CPK color # * electronic configuration # EXERCISE: show the plot
bsd-3-clause
wscullin/spack
var/spack/repos/builtin/packages/lua-luaposix/package.py
3
1714
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * import glob class LuaLuaposix(Package): """Lua posix bindings, including ncurses""" homepage = "https://github.com/luaposix/luaposix/" url = "https://github.com/luaposix/luaposix/archive/release-v33.4.0.tar.gz" version('33.4.0', 'b36ff049095f28752caeb0b46144516c') extends("lua") def install(self, spec, prefix): rockspec = glob.glob('luaposix-*.rockspec') luarocks('--tree=' + prefix, 'install', rockspec[0])
lgpl-2.1
patilsangram/erpnext
erpnext/demo/setup/manufacture.py
14
4071
from __future__ import unicode_literals import random, json import frappe from frappe.utils import nowdate, add_days from erpnext.demo.setup.setup_data import import_json from erpnext.demo.domains import data from six import iteritems def setup_data(): import_json("Asset Category") setup_item() setup_workstation() setup_asset() import_json('Operation') setup_item_price() show_item_groups_in_website() import_json('BOM', submit=True) frappe.db.commit() frappe.clear_cache() def setup_workstation(): workstations = [u'Drilling Machine 1', u'Lathe 1', u'Assembly Station 1', u'Assembly Station 2', u'Packing and Testing Station'] for w in workstations: frappe.get_doc({ "doctype": "Workstation", "workstation_name": w, "holiday_list": frappe.get_all("Holiday List")[0].name, "hour_rate_consumable": int(random.random() * 20), "hour_rate_electricity": int(random.random() * 10), "hour_rate_labour": int(random.random() * 40), "hour_rate_rent": int(random.random() * 10), "working_hours": [ { "enabled": 1, "start_time": "8:00:00", "end_time": "15:00:00" } ] }).insert() def show_item_groups_in_website(): """set show_in_website=1 for Item Groups""" products = frappe.get_doc("Item Group", "Products") products.show_in_website = 1 products.route = 'products' products.save() def setup_asset(): assets = json.loads(open(frappe.get_app_path('erpnext', 'demo', 'data', 'asset.json')).read()) for d in assets: asset = frappe.new_doc('Asset') asset.update(d) asset.purchase_date = add_days(nowdate(), -random.randint(20, 1500)) asset.next_depreciation_date = add_days(asset.purchase_date, 30) asset.warehouse = "Stores - WPL" asset.set_missing_values() asset.make_depreciation_schedule() asset.flags.ignore_validate = True asset.flags.ignore_mandatory = True asset.save() asset.submit() def setup_item(): items = json.loads(open(frappe.get_app_path('erpnext', 'demo', 'data', 'item.json')).read()) for i in items: item = frappe.new_doc('Item') item.update(i) if hasattr(item, 'item_defaults') and item.item_defaults[0].default_warehouse: item.item_defaults[0].company = data.get("Manufacturing").get('company_name') warehouse = frappe.get_all('Warehouse', filters={'warehouse_name': item.item_defaults[0].default_warehouse}, limit=1) if warehouse: item.item_defaults[0].default_warehouse = warehouse[0].name item.insert() def setup_product_bundle(): frappe.get_doc({ 'doctype': 'Product Bundle', 'new_item_code': 'Wind Mill A Series with Spare Bearing', 'items': [ {'item_code': 'Wind Mill A Series', 'qty': 1}, {'item_code': 'Bearing Collar', 'qty': 1}, {'item_code': 'Bearing Assembly', 'qty': 1}, ] }).insert() def setup_item_price(): frappe.db.sql("delete from `tabItem Price`") standard_selling = { "Base Bearing Plate": 28, "Base Plate": 21, "Bearing Assembly": 300, "Bearing Block": 14, "Bearing Collar": 103.6, "Bearing Pipe": 63, "Blade Rib": 46.2, "Disc Collars": 42, "External Disc": 56, "Internal Disc": 70, "Shaft": 340, "Stand": 400, "Upper Bearing Plate": 300, "Wind Mill A Series": 320, "Wind Mill A Series with Spare Bearing": 750, "Wind MIll C Series": 400, "Wind Turbine": 400, "Wing Sheet": 30.8 } standard_buying = { "Base Bearing Plate": 20, "Base Plate": 28, "Base Plate Un Painted": 16, "Bearing Block": 13, "Bearing Collar": 96.4, "Bearing Pipe": 55, "Blade Rib": 38, "Disc Collars": 34, "External Disc": 50, "Internal Disc": 60, "Shaft": 250, "Stand": 300, "Upper Bearing Plate": 200, "Wing Sheet": 25 } for price_list in ("standard_buying", "standard_selling"): for item, rate in iteritems(locals().get(price_list)): frappe.get_doc({ "doctype": "Item Price", "price_list": price_list.replace("_", " ").title(), "item_code": item, "selling": 1 if price_list=="standard_selling" else 0, "buying": 1 if price_list=="standard_buying" else 0, "price_list_rate": rate, "currency": "USD" }).insert()
gpl-3.0
Stavitsky/neutron
neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
15
1731
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """L3 extension distributed mode Revision ID: 3927f7f7c456 Revises: db_healing Create Date: 2014-04-02 23:26:19.303633 """ # revision identifiers, used by Alembic. revision = '3927f7f7c456' down_revision = 'db_healing' from alembic import op import sqlalchemy as sa def upgrade(): context = op.get_context() op.create_table( 'router_extra_attributes', sa.Column('router_id', sa.String(length=36), nullable=False), sa.Column('distributed', sa.Boolean(), nullable=False, server_default=sa.sql.false()), sa.ForeignKeyConstraint( ['router_id'], ['routers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('router_id') ) if context.bind.dialect.name == 'ibm_db_sa': # NOTE(mriedem): DB2 stores booleans as 0 and 1. op.execute("INSERT INTO router_extra_attributes " "SELECT id as router_id, " "0 as distributed from routers") else: op.execute("INSERT INTO router_extra_attributes " "SELECT id as router_id, " "False as distributed from routers")
apache-2.0
camptocamp/odoo
addons/account_anglo_saxon/invoice.py
61
13374
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) # 2004-2010 Tiny SPRL (<http://tiny.be>). # 2009-2010 Veritos (http://veritos.nl). # All Rights Reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields class account_invoice_line(osv.osv): _inherit = "account.invoice.line" _columns = { 'move_id': fields.many2one('stock.move', string="Move line", help="If the invoice was generated from a stock.picking, reference to the related move line."), } def move_line_get(self, cr, uid, invoice_id, context=None): res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context) inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context) company_currency = inv.company_id.currency_id.id def get_price(cr, uid, inv, company_currency, i_line, price_unit): cur_obj = self.pool.get('res.currency') if inv.currency_id.id != company_currency: price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, price_unit * i_line.quantity, context={'date': inv.date_invoice}) else: price = price_unit * i_line.quantity return price if inv.type in ('out_invoice','out_refund'): for i_line in inv.invoice_line: if i_line.product_id and i_line.product_id.valuation == 'real_time': if inv.type == 'out_invoice': # debit account dacc will be the output account # first check the product, if empty check the category dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id if not dacc: dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id else: # = out_refund # debit account dacc will be the input account # first check the product, if empty check the category dacc = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id if not dacc: dacc = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id # in both cases the credit account cacc will be the expense account # first check the product, if empty check the category cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id if not cacc: cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id if dacc and cacc: price_unit = i_line.move_id and i_line.move_id.price_unit or i_line.product_id.standard_price res.append({ 'type':'src', 'name': i_line.name[:64], 'price_unit':price_unit, 'quantity':i_line.quantity, 'price':get_price(cr, uid, inv, company_currency, i_line, price_unit), 'account_id':dacc, 'product_id':i_line.product_id.id, 'uos_id':i_line.uos_id.id, 'account_analytic_id': False, 'taxes':i_line.invoice_line_tax_id, }) res.append({ 'type':'src', 'name': i_line.name[:64], 'price_unit':price_unit, 'quantity':i_line.quantity, 'price': -1 * get_price(cr, uid, inv, company_currency, i_line, price_unit), 'account_id':cacc, 'product_id':i_line.product_id.id, 'uos_id':i_line.uos_id.id, 'account_analytic_id': False, 'taxes':i_line.invoice_line_tax_id, }) elif inv.type in ('in_invoice','in_refund'): for i_line in inv.invoice_line: if i_line.product_id and i_line.product_id.valuation == 'real_time': if i_line.product_id.type != 'service': # get the price difference account at the product acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id if not acc: # if not found on the product get the price difference account at the category acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id a = None if inv.type == 'in_invoice': # oa will be the stock input account # first check the product, if empty check the category oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id if not oa: oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id else: # = in_refund # oa will be the stock output account # first check the product, if empty check the category oa = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id if not oa: oa = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id if oa: # get the fiscal position fpos = i_line.invoice_id.fiscal_position or False a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa) diff_res = [] # calculate and write down the possible price difference between invoice price and product price for line in res: if a == line['account_id'] and i_line.product_id.id == line['product_id']: uom = i_line.product_id.uos_id or i_line.product_id.uom_id valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id) if inv.currency_id.id != company_currency: standard_price = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, standard_price, context={'date': inv.date_invoice}) if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id: #for average/fifo/lifo costing method, fetch real cost price from incomming moves stock_move_obj = self.pool.get('stock.move') valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context) if valuation_stock_move: valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc: price_diff = i_line.price_unit - valuation_price_unit line.update({'price': valuation_price_unit * line['quantity']}) diff_res.append({ 'type': 'src', 'name': i_line.name[:64], 'price_unit': price_diff, 'quantity': line['quantity'], 'price': price_diff * line['quantity'], 'account_id': acc, 'product_id': line['product_id'], 'uos_id': line['uos_id'], 'account_analytic_id': line['account_analytic_id'], 'taxes': line.get('taxes', []), }) res += diff_res return res def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None): fiscal_pool = self.pool.get('account.fiscal.position') res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context, company_id) if not product: return res if type in ('in_invoice','in_refund'): product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context) if type == 'in_invoice': oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id if not oa: oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id else: oa = product_obj.property_stock_account_output and product_obj.property_stock_account_output.id if not oa: oa = product_obj.categ_id.property_stock_account_output_categ and product_obj.categ_id.property_stock_account_output_categ.id if oa: fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False a = fiscal_pool.map_account(cr, uid, fpos, oa) res['value'].update({'account_id':a}) return res class account_invoice(osv.osv): _inherit = "account.invoice" def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None): invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id, description, journal_id, context=context) if invoice.type == 'in_invoice': fiscal_position = self.pool.get('account.fiscal.position') for _, _, line_dict in invoice_data['invoice_line']: if line_dict.get('product_id'): product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context) counterpart_acct_id = product.property_stock_account_output and \ product.property_stock_account_output.id if not counterpart_acct_id: counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \ product.categ_id.property_stock_account_output_categ.id if counterpart_acct_id: fpos = invoice.fiscal_position or False line_dict['account_id'] = fiscal_position.map_account(cr, uid, fpos, counterpart_acct_id) return invoice_data # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
shahbazn/neutron
neutron/agent/l3/dvr_snat_ns.py
44
1778
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.common import constants LOG = logging.getLogger(__name__) SNAT_NS_PREFIX = 'snat-' SNAT_INT_DEV_PREFIX = constants.SNAT_INT_DEV_PREFIX class SnatNamespace(namespaces.Namespace): def __init__(self, router_id, agent_conf, driver, use_ipv6): self.router_id = router_id name = self.get_snat_ns_name(router_id) super(SnatNamespace, self).__init__( name, agent_conf, driver, use_ipv6) @classmethod def get_snat_ns_name(cls, router_id): return namespaces.build_ns_name(SNAT_NS_PREFIX, router_id) def delete(self): ns_ip = ip_lib.IPWrapper(namespace=self.name) for d in ns_ip.get_devices(exclude_loopback=True): if d.name.startswith(SNAT_INT_DEV_PREFIX): LOG.debug('Unplugging DVR device %s', d.name) self.driver.unplug(d.name, namespace=self.name, prefix=SNAT_INT_DEV_PREFIX) # TODO(mrsmith): delete ext-gw-port LOG.debug('DVR: destroy snat ns: %s', self.name) super(SnatNamespace, self).delete()
apache-2.0
ztultrebor/BARKEVIOUS
BARKEVIOUS.py
1
1924
# coding: utf-8 #read in libraries import cPickle as pickle from webcrawler import coredump from dataloader import get_trawled_data, introduce_weighting from ratings import PowerRater from history import historical, model_the_model from predict import predict from oddsmaker import read_odds from betting import wager csv_file = 'NBA_data_2015' # whence the data come weight_factor = 60 # number of days over which to decrease the weight by 40% # weight factor needs justification HCA = 1.8 sigma = 13.5 # ask user for guidence on reading in data from web and write to csv file user_input = raw_input("Do you want to trawl the web for the latest data? ") if user_input in ['y', 'Y', 'yes', 'Yes', 'YES']: website = 'http://www.basketball-reference.com/leagues/NBA_2016_games.html' coredump(website, csv_file) #load data from csv as a pandas DataFrame data = get_trawled_data(csv_file, ['Date', 'Away', 'Away Score', 'Home', 'Home Score']) # compile a list of historical predictions and actual outcomes history_file = 'Predictive_outcomes_2015' past_predictions = historical(data, weight_factor, history_file, HCA, sigma) # get the fit parameters needed to correct errors in the historical model beta_correct = model_the_model(past_predictions) print 'Checking on the model parameters: %s' % beta_correct # make predictions todays_schedule = pickle.load(open('Today', 'rb')) data = introduce_weighting(data, weight_factor,home_court_advantage=HCA) # add weights column PwrRt = PowerRater(data).power_ratings # generate latest ratings print PwrRt.sort_values(by='rating', ascending=False) prob = [] for i in xrange(todays_schedule.shape[0]): prob.append(predict(todays_schedule.iloc[i], PwrRt, HCA, sigma)) todays_schedule['Prob'] = prob # pull in odds odds = read_odds('Odds.csv', todays_schedule) # determine optimal betting strategy print wager(odds) # pull in 538 predictions # model the 538 model
mit
dzz007/photivo
scons-local-2.2.0/SCons/Tool/gettext.py
14
2077
"""gettext tool """ # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __revision__ = "src/engine/SCons/Tool/gettext.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo" ############################################################################# def generate(env,**kw): import SCons.Tool from SCons.Tool.GettextCommon \ import _translate, tool_list for t in tool_list(env['PLATFORM'], env): env.Tool(t) env.AddMethod(_translate, 'Translate') ############################################################################# ############################################################################# def exists(env): from SCons.Tool.GettextCommon \ import _xgettext_exists, _msginit_exists, \ _msgmerge_exists, _msgfmt_exists return _xgettext_exists(env) and _msginit_exists(env) \ and _msgmerge_exists(env) and _msgfmt_exists(env) #############################################################################
gpl-3.0
cernanalysispreservation/cap-client
cap_client/cli/metadata_cli.py
1
2900
# -*- coding: utf-8 -*- # # This file is part of CERN Analysis Preservation Framework. # Copyright (C) 2020 CERN. # # CERN Analysis Preservation Framework is free software; you can redistribute # it and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # CERN Analysis Preservation Framework is distributed in the hope that it will # be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with CERN Analysis Preservation Framework; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Metadata CAP Client CLI.""" import click from cap_client.api.metadata_api import MetadataAPI from cap_client.utils import (ColoredGroup, MutuallyExclusiveOption, json_dumps, load_json, load_json_from_file, logger, pid_option) pass_api = click.make_pass_decorator(MetadataAPI, ensure=True) @click.group(cls=ColoredGroup) def metadata(): """Manage analysis metadata.""" @metadata.command() @pid_option(required=True) @click.option( '--field', help="Specify an EXISTING field\n eg. object.nested_array.0", ) @click.option( '--json', cls=MutuallyExclusiveOption, not_required_if="jsonfile", callback=load_json, help='\nJSON data or text.', ) @click.option( '--jsonfile', type=click.File('r'), cls=MutuallyExclusiveOption, not_required_if="json", callback=load_json_from_file, help='\nJSON file.', ) @pass_api @logger def update(api, pid, json, jsonfile, field): """Update analysis metadata.""" res = api.set( pid=pid, value=jsonfile if json is None else json, field=field, ) click.echo(json_dumps(res)) @metadata.command() @pid_option(required=True) @click.option( '--field', required=True, help="Specify field, eg. object.nested_array.0", ) @pass_api @logger def remove(api, pid, field): """Remove from analysis metadata.""" res = api.remove( pid=pid, field=field, ) click.echo(json_dumps(res)) @metadata.command() @pid_option(required=True) @click.option( '--field', help="Specify field, eg. object.nested_array.0", ) @pass_api @logger def get(api, pid, field): """Get analysis metadata.""" res = api.get( pid=pid, field=field, ) click.echo(json_dumps(res))
gpl-2.0
hoangt/tpzsimul.gem5
src/arch/arm/ArmSystem.py
9
4808
# Copyright (c) 2009, 2012-2013 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Ali Saidi from m5.params import * from System import System class ArmMachineType(Enum): map = {'RealView_EB' : 827, 'RealView_PBX' : 1901, 'VExpress_ELT' : 2272, 'VExpress_CA9' : 2272, 'VExpress_EMM' : 2272, 'VExpress_EMM64' : 2272} class ArmSystem(System): type = 'ArmSystem' cxx_header = "arch/arm/system.hh" load_addr_mask = 0xffffffff multi_proc = Param.Bool(True, "Multiprocessor system?") boot_loader = Param.String("", "File that contains the boot loader code if any") gic_cpu_addr = Param.Addr(0, "Addres of the GIC CPU interface") flags_addr = Param.Addr(0, "Address of the flags register for MP booting") have_security = Param.Bool(False, "True if Security Extensions are implemented") have_virtualization = Param.Bool(False, "True if Virtualization Extensions are implemented") have_lpae = Param.Bool(False, "True if LPAE is implemented") have_generic_timer = Param.Bool(False, "True if the Generic Timer extension is implemented") highest_el_is_64 = Param.Bool(False, "True if the register width of the highest implemented exception level " "is 64 bits (ARMv8)") reset_addr_64 = Param.Addr(0x0, "Reset address if the highest implemented exception level is 64 bits " "(ARMv8)") phys_addr_range_64 = Param.UInt8(40, "Supported physical address range in bits when using AArch64 (ARMv8)") have_large_asid_64 = Param.Bool(False, "True if ASID is 16 bits in AArch64 (ARMv8)") class LinuxArmSystem(ArmSystem): type = 'LinuxArmSystem' cxx_header = "arch/arm/linux/system.hh" load_addr_mask = 0x0fffffff machine_type = Param.ArmMachineType('RealView_PBX', "Machine id from http://www.arm.linux.org.uk/developer/machines/") atags_addr = Param.Addr("Address where default atags structure should " \ "be written") boot_release_addr = Param.Addr(0xfff8, "Address where secondary CPUs " \ "spin waiting boot in the loader") dtb_filename = Param.String("", "File that contains the Device Tree Blob. Don't use DTB if empty.") early_kernel_symbols = Param.Bool(False, "enable early kernel symbol tables before MMU") enable_context_switch_stats_dump = Param.Bool(False, "enable stats/task info dumping at context switch boundaries") panic_on_panic = Param.Bool(False, "Trigger a gem5 panic if the " \ "guest kernel panics") panic_on_oops = Param.Bool(False, "Trigger a gem5 panic if the " \ "guest kernel oopses")
bsd-3-clause
pratapvardhan/pandas
pandas/core/tools/numeric.py
1
6034
import numpy as np import pandas as pd from pandas.core.dtypes.common import ( is_scalar, is_numeric_dtype, is_decimal, is_datetime_or_timedelta_dtype, is_number, _ensure_object) from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas._libs import lib def to_numeric(arg, errors='raise', downcast=None): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Parameters ---------- arg : list, tuple, 1-d array, or Series errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaN - If 'ignore', then invalid parsing will return the input downcast : {'integer', 'signed', 'unsigned', 'float'} , default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. .. versionadded:: 0.19.0 Returns ------- ret : numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 See also -------- pandas.DataFrame.astype : Cast argument to a specified dtype. pandas.to_datetime : Convert argument to datetime. pandas.to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. """ if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): raise ValueError('invalid downcasting method provided') is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndexClass): is_index = True values = arg.asi8 if values is None: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype='O') elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype='O') elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a list, tuple, 1-d array, or Series') else: values = arg try: if is_numeric_dtype(values): pass elif is_datetime_or_timedelta_dtype(values): values = values.astype(np.int64) else: values = _ensure_object(values) coerce_numeric = False if errors in ('ignore', 'raise') else True values = lib.maybe_convert_numeric(values, set(), coerce_numeric=coerce_numeric) except Exception: if errors == 'raise': raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values): typecodes = None if downcast in ('integer', 'signed'): typecodes = np.typecodes['Integer'] elif downcast == 'unsigned' and np.min(values) >= 0: typecodes = np.typecodes['UnsignedInteger'] elif downcast == 'float': typecodes = np.typecodes['Float'] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for dtype in typecodes: if np.dtype(dtype).itemsize <= values.dtype.itemsize: values = maybe_downcast_to_dtype(values, dtype) # successful conversion if values.dtype == dtype: break if is_series: return pd.Series(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy_with_infer return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values
bsd-3-clause
dmccue/ansible
lib/ansible/parsing/yaml/loader.py
234
1877
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type try: from _yaml import CParser, CEmitter HAVE_PYYAML_C = True except ImportError: HAVE_PYYAML_C = False from yaml.resolver import Resolver from ansible.parsing.yaml.constructor import AnsibleConstructor if HAVE_PYYAML_C: class AnsibleLoader(CParser, AnsibleConstructor, Resolver): def __init__(self, stream, file_name=None): CParser.__init__(self, stream) AnsibleConstructor.__init__(self, file_name=file_name) Resolver.__init__(self) else: from yaml.composer import Composer from yaml.reader import Reader from yaml.scanner import Scanner from yaml.parser import Parser class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver): def __init__(self, stream, file_name=None): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) Composer.__init__(self) AnsibleConstructor.__init__(self, file_name=file_name) Resolver.__init__(self)
gpl-3.0
DimensionDataCBUSydney/plumbery
tests/test_engine.py
2
31173
#!/usr/bin/env python """ Tests for `plumbery` module. """ # special construct to allow relative import # if __name__ == "__main__" and __package__ is None: __package__ = "tests" from tests import dummy import base64 import logging import mock import os import unittest import yaml from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives import serialization import six if six.PY2: b = bytes = ensure_string = str else: def ensure_string(s): if isinstance(s, str): return s elif isinstance(s, bytes): return s.decode('utf-8') else: raise TypeError("Invalid argument %r for ensure_string()" % (s,)) def b(s): if isinstance(s, str): return s.encode('utf-8') elif isinstance(s, bytes): return s elif isinstance(s, int): return bytes([s]) else: raise TypeError("Invalid argument %r for b()" % (s,)) from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver from plumbery.__main__ import parse_args, main from plumbery.action import PlumberyAction from plumbery.engine import PlumberyEngine from plumbery.plogging import plogging from plumbery.polisher import PlumberyPolisher from plumbery import __version__ import requests_mock from .mock_api import DimensionDataMockHttp DIMENSIONDATA_PARAMS = ('user', 'password') myParameters = { 'locationId': 'EU8', 'domainName': 'aDifferentDomain', 'networkName': 'aDifferentNetwork' } myPlan = """ --- safeMode: False information: - hello - world links: documentation: "http://www.acme.com/" defaults: domain: ipv4: auto cloud-config: disable_root: false ssh_pwauth: true ssh_keys: rsa_private: | {{ pair1.rsa_private }} rsa_public: "{{ pair1.ssh.rsa_public }}" write_files: runcmd: parameters: locationId: information: - "the target data centre for this deployment" type: locations.list default: EU6 domainName: information: - "the name of the network domain to be deployed" type: str default: myDC networkName: information: - "the name of the Ethernet VLAN to be deployed" type: str default: myVLAN buildPolisher: alien actions: - ansible: output: gigafox_ansible.yaml - inventory: output: gigafox_inventory.yaml - prepare: key: ~/.ssh/myproject_rsa.pub output: gigafox_prepares.yaml --- # Frankfurt in Europe locationId: "{{ parameter.locationId }}" regionId: dd-eu blueprints: - myBlueprint: domain: name: "{{ parameter.domainName }}" ethernet: name: "{{ parameter.networkName }}" subnet: 10.1.10.0 nodes: - myServer: """ myBadPlan1 = """ --- parameters: locationId: information: - "the target data centre for this deployment" type: locations.list default: EU6 domainName: information: - "the name of the network domain to be deployed" type: str default: myDC networkName: information: - "the name of the Ethernet VLAN to be deployed" type: str default: myVLAN parameterWithoutDefaultValue: information: - "this definition is partial, and missing a defautl value" type: str --- locationId: "{{ parameter.locationId }}" blueprints: - myBlueprint: domain: name: "{{ parameter.domainName }}" ethernet: name: "{{ parameter.networkName }}" subnet: 10.1.10.0 nodes: - myServer: """ myEuropeanPlan = """ --- safeMode: False information: - hello - world links: documentation: "http://www.acme.com/" defaults: domain: ipv4: auto cloud-config: disable_root: false ssh_pwauth: true ssh_keys: rsa_private: | {{ pair1.rsa_private }} rsa_public: "{{ pair1.ssh.rsa_public }}" hostname: "{{ parameter.nodeName }}" packages: - ntp write_files: - path: /root/hosts.awk content: | #!/usr/bin/awk -f /^{{ {{ parameter.nodeName }}.private }}/ {next} /^{{ {{ parameter.nodeName }}.ipv6 }}/ {next} {print} END { print "{{ {{ parameter.nodeName }}.private }} {{ parameter.nodeName }}" print "{{ {{ parameter.nodeName }}.ipv6 }} {{ parameter.nodeName }}" } parameters: locationId: information: - "the target data centre for this deployment" type: locations.list default: EU8 regionId: information: - "the target region for this deployment" type: regions.list default: dd-eu domainName: information: - "the name of the network domain to be deployed" type: str default: myDC networkName: information: - "the name of the Ethernet VLAN to be deployed" type: str default: myVLAN nodeName: information: - "the name of the node to be deployed" type: str default: myServer --- locationId: "{{ parameter.locationId }}" regionId: {{ parameter.regionId }} blueprints: - myBlueprint: domain: name: "{{ parameter.domainName }}" ethernet: name: "{{ parameter.networkName }}" subnet: 10.1.10.0 nodes: - {{ parameter.nodeName }}: """ myAmericanBinding = { 'locationId': 'NA9', 'regionId': 'dd-na', 'nodeName': 'toto' } myFacility = { 'regionId': 'dd-na', 'locationId': 'NA9', 'blueprints': [{ 'fake': { 'domain': { 'name': 'VDC1', 'service': 'ADVANCED', 'description': 'fake'}, 'ethernet': { 'name': 'vlan1', 'subnet': '10.0.10.0', 'description': 'fake'}, 'nodes': [{ 'stackstorm': { 'description': 'fake', 'appliance': 'RedHat 6 64-bit 4 CPU' } }] } }] } myPrivatePlan = """ --- safeMode: True apiHost: quasimoto.com locationId: NA9 blueprints: - myBlueprint: domain: name: myDC ethernet: accept: - NA19::remoteNetwork nodes: - myServer: default: bee information: - complementary information memory: 5 cloud-config: packages: - smtp runcmd: - echo "world" """ class FakeLocation: id = 'EU7' name = 'data centre in Amsterdam' country = 'Netherlands' class FakeAction(PlumberyAction): def __init__(self, settings): self.count = 3 self.label = 'fake' def begin(self, engine): self.count += 100 def enter(self, facility): self.count *= 2 def process(self, blueprint): self.count += 5 def quit(self): self.count -= 2 def end(self): self.count += 1 class TestPlumberyEngine(unittest.TestCase): def test_init(self): engine = PlumberyEngine() engine.set_fittings(myPlan) self.assertEqual(engine.buildPolisher, 'alien') domain = engine.get_default('domain') self.assertEqual(domain['ipv4'], 'auto') cloudConfig = engine.get_default('cloud-config', {}) self.assertEqual(len(cloudConfig.keys()), 5) self.assertEqual(len(engine.information), 2) self.assertEqual(len(engine.links), 1) parameters = engine.get_parameters() self.assertEqual(parameters['parameter.locationId'], 'EU6') self.assertEqual(parameters['parameter.domainName'], 'myDC') self.assertEqual(parameters['parameter.networkName'], 'myVLAN') parameter = engine.get_parameter('locationId') self.assertEqual(parameter, 'EU6') parameter = engine.get_parameter('domainName') self.assertEqual(parameter, 'myDC') parameter = engine.get_parameter('networkName') self.assertEqual(parameter, 'myVLAN') self.assertEqual(len(engine.polishers), 3) for polisher in engine.polishers: self.assertTrue(isinstance(polisher, PlumberyPolisher)) self.assertEqual(engine.safeMode, False) self.assertEqual(len(engine.facilities), 1) facility = engine.facilities[0] self.assertEqual(facility.settings['locationId'], 'EU6') self.assertEqual(facility.settings['regionId'], 'dd-eu') blueprint = facility.blueprints[0]['myBlueprint'] self.assertEqual(blueprint['domain']['name'], 'myDC') self.assertEqual(blueprint['ethernet']['name'], 'myVLAN') def test_parameters(self): engine = PlumberyEngine() parameters = engine.get_parameters() self.assertTrue('parameter.locationId' not in parameters) self.assertTrue('parameter.domainName' not in parameters) self.assertTrue('parameter.networkName' not in parameters) with self.assertRaises(KeyError): engine.get_parameter('locationId') with self.assertRaises(KeyError): engine.get_parameter('domainName') with self.assertRaises(KeyError): engine.get_parameter('perfectlyUnknownParameter') with self.assertRaises(KeyError): engine.lookup('parameter.locationId') with self.assertRaises(ValueError): engine.set_fittings(myBadPlan1) engine.set_fittings(myPlan) parameters = engine.get_parameters() self.assertEqual(parameters['parameter.locationId'], 'EU6') self.assertEqual(parameters['parameter.domainName'], 'myDC') self.assertEqual(parameters['parameter.networkName'], 'myVLAN') self.assertEqual(engine.get_parameter('locationId'), 'EU6') self.assertEqual(engine.get_parameter('parameter.locationId'), 'EU6') with self.assertRaises(KeyError): engine.get_parameter('perfectlyUnknownParameter') engine = PlumberyEngine() engine.set_parameters(myParameters) parameters = engine.get_parameters() self.assertEqual(parameters['parameter.locationId'], 'EU8') self.assertEqual(parameters['parameter.domainName'], 'aDifferentDomain') self.assertEqual(parameters['parameter.networkName'], 'aDifferentNetwork') engine.set_fittings(myPlan) parameters = engine.get_parameters() self.assertEqual(parameters['parameter.locationId'], 'EU8') self.assertEqual(parameters['parameter.domainName'], 'aDifferentDomain') self.assertEqual(parameters['parameter.networkName'], 'aDifferentNetwork') self.assertEqual(engine.safeMode, False) self.assertEqual(len(engine.information), 2) self.assertEqual(len(engine.links), 1) domain = engine.get_default('domain') self.assertEqual(domain['ipv4'], 'auto') cloudConfig = engine.get_default('cloud-config', {}) self.assertEqual(len(cloudConfig.keys()), 5) parameter = engine.get_parameter('locationId') self.assertEqual(parameter, 'EU8') parameter = engine.get_parameter('domainName') self.assertEqual(parameter, 'aDifferentDomain') parameter = engine.get_parameter('networkName') self.assertEqual(parameter, 'aDifferentNetwork') self.assertEqual(len(engine.facilities), 1) facility = engine.facilities[0] self.assertEqual(facility.settings['locationId'], 'EU8') self.assertEqual(facility.settings['regionId'], 'dd-eu') blueprint = facility.blueprints[0]['myBlueprint'] self.assertEqual(blueprint['domain']['name'], 'aDifferentDomain') self.assertEqual(blueprint['ethernet']['name'], 'aDifferentNetwork') def test_environment(self): engine = PlumberyEngine() self.assertTrue(len(engine.lookup('environment.PATH')) > 0) with self.assertRaises(KeyError): engine.lookup('environment.PERFECTLY_UNKNOWN_FROM_HERE') def test_set(self): engine = PlumberyEngine() DimensionDataNodeDriver.connectionCls.conn_classes = ( None, DimensionDataMockHttp) DimensionDataMockHttp.type = None self.region = DimensionDataNodeDriver(*DIMENSIONDATA_PARAMS) file = os.path.abspath( os.path.dirname(__file__))+'/fixtures/dummy_rsa.pub' settings = { 'keys': [ "*hello-there*" ], } with self.assertRaises(ValueError): engine.set_settings(settings) settings = { 'keys': [ file ], } engine.set_settings(settings) self.assertTrue(isinstance(engine.get_shared_key_files(), list)) self.assertTrue(file in engine.get_shared_key_files()) settings = { 'safeMode': False, 'polishers': [ {'ansible': {}}, {'configure': {}}, ], 'keys': [ file, file ], } engine.set_settings(settings) self.assertEqual(engine.safeMode, False) self.assertTrue(isinstance(engine.get_shared_key_files(), list)) self.assertTrue(file in engine.get_shared_key_files()) engine.add_facility(myFacility) self.assertEqual(len(engine.facilities), 1) self.assertEqual(engine.get_shared_user(), 'root') engine.set_shared_user('ubuntu') self.assertEqual(engine.get_shared_user(), 'ubuntu') engine.set_shared_secret('fake_secret') self.assertEqual(engine.get_shared_secret(), 'fake_secret') random = engine.get_secret('random') self.assertEqual(len(random), 9) self.assertEqual(engine.get_secret('random'), random) engine.set_user_name('fake_name') self.assertEqual(engine.get_user_name(), 'fake_name') engine.set_user_password('fake_password') self.assertEqual(engine.get_user_password(), 'fake_password') def test_settings_private(self): engine = PlumberyEngine() engine.set_shared_secret('fake_secret') engine.set_user_name('fake_name') engine.set_user_password('fake_password') engine.set_fittings(myPrivatePlan) facilities = engine.list_facility('quasimoto.com') self.assertEqual(len(facilities), 1) facilities[0].power_on() # self.assertEqual(facilities[0].region.connection.host, 'quasimoto.com') def test_lifecycle(self): engine = PlumberyEngine() DimensionDataNodeDriver.connectionCls.conn_classes = ( None, DimensionDataMockHttp) DimensionDataMockHttp.type = None self.region = DimensionDataNodeDriver(*DIMENSIONDATA_PARAMS) engine.set_shared_secret('fake_secret') engine.set_user_name('fake_name') engine.set_user_password('fake_password') engine.do('build') engine.build_all_blueprints() engine.do('build', 'myBlueprint') engine.build_blueprint('myBlueprint') engine.do('deploy') engine.do('deploy', 'myBlueprint') engine.do('destroy') engine.destroy_all_blueprints() engine.do('destroy', 'myBlueprint') engine.destroy_blueprint('myBlueprint') engine.do('dispose') engine.do('dispose', 'myBlueprint') engine.do('polish') engine.polish_all_blueprints() engine.do('polish', 'myBlueprint') engine.polish_blueprint('myBlueprint') engine.do('refresh') engine.do('refresh', 'myBlueprint') engine.do('secrets') engine.do('start') engine.start_all_blueprints() engine.do('start', 'myBlueprint') engine.start_blueprint('myBlueprint') engine.do('stop') engine.stop_all_blueprints() engine.do('stop', 'myBlueprint') engine.stop_blueprint('myBlueprint') engine.do('wipe') engine.wipe_all_blueprints() engine.do('wipe', 'myBlueprint') engine.wipe_blueprint('myBlueprint') banner = engine.document_elapsed() self.assertEqual('Worked for you' in banner, True) def test_process_all_blueprints(self): engine = PlumberyEngine() DimensionDataNodeDriver.connectionCls.conn_classes = ( None, DimensionDataMockHttp) DimensionDataMockHttp.type = None self.region = DimensionDataNodeDriver(*DIMENSIONDATA_PARAMS) engine.set_shared_secret('fake_secret') engine.set_user_name('fake_name') engine.set_user_password('fake_password') engine.set_fittings(myPrivatePlan) engine.process_all_blueprints(action='noop') action = FakeAction({}) engine.process_all_blueprints(action) self.assertEqual(action.count, 210) def test_process_blueprint(self): engine = PlumberyEngine() DimensionDataNodeDriver.connectionCls.conn_classes = ( None, DimensionDataMockHttp) DimensionDataMockHttp.type = None self.region = DimensionDataNodeDriver(*DIMENSIONDATA_PARAMS) engine.set_shared_secret('fake_secret') engine.set_user_name('fake_name') engine.set_user_password('fake_password') engine.set_fittings(myPrivatePlan) engine.process_blueprint(action='noop', names='fake') action = FakeAction({}) engine.process_blueprint(action, names='fake') self.assertEqual(action.count, 205) def test_as_library(self): engine = PlumberyEngine(myEuropeanPlan, myAmericanBinding) DimensionDataNodeDriver.connectionCls.conn_classes = ( None, DimensionDataMockHttp) DimensionDataMockHttp.type = None self.region = DimensionDataNodeDriver(*DIMENSIONDATA_PARAMS) engine.set_shared_secret('fake_secret') engine.set_user_name('fake_name') engine.set_user_password('fake_password') facilities = engine.list_facility('NA9') self.assertEqual(len(facilities), 1) facility = facilities[0] self.assertEqual(facility.get_setting('regionId'), 'dd-na') self.assertEqual(facility.get_setting('locationId'), 'NA9') self.assertTrue(facility.get_blueprint('fake') is None) blueprint = facility.get_blueprint('myBlueprint') node = blueprint['nodes'][0] self.assertEqual(list(node)[0], 'toto') config = node['toto']['cloud-config'] self.assertEqual(config['hostname'], 'toto') self.assertEqual(config['write_files'][0]['content'].count('toto'), 6) engine.do('deploy') engine.do('dispose') def test_lookup(self): engine = PlumberyEngine() self.assertEqual(engine.lookup('plumbery.version'), __version__) engine.secrets = {} random = engine.lookup('secret.random') self.assertEqual(len(random), 9) self.assertEqual(engine.lookup('secret.random'), random) md5 = engine.lookup('secret.random.md5') self.assertEqual(len(md5), 32) self.assertNotEqual(md5, random) sha = engine.lookup('secret.random.sha1') self.assertEqual(len(sha), 40) self.assertNotEqual(sha, random) sha = engine.lookup('secret.random.sha256') self.assertEqual(len(sha), 64) self.assertNotEqual(sha, random) id1 = engine.lookup('id1.uuid') self.assertEqual(len(id1), 36) self.assertEqual(engine.lookup('id1.uuid'), id1) id2 = engine.lookup('id2.uuid') self.assertEqual(len(id2), 36) self.assertNotEqual(id1, id2) engine.lookup('application.secret') engine.lookup('database.secret') engine.lookup('master.secret') engine.lookup('slave.secret') original = b'hello world' print('original: {}'.format(original)) text = ensure_string(engine.lookup('rsa_public.pair1')) print('rsa_public.pair1: {}'.format(text)) self.assertTrue(text.startswith('ssh-rsa ')) text = b(text) key = serialization.load_ssh_public_key( data=text, backend=default_backend()) encrypted = key.encrypt( original, padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None ) ) encrypted = base64.b64encode(encrypted) print('encrypted: {}'.format(encrypted)) privateKey = engine.lookup('rsa_private.pair1') print('rsa_private.pair1: {}'.format(privateKey)) self.assertTrue(ensure_string(privateKey).startswith( '-----BEGIN RSA PRIVATE KEY-----')) privateKey = serialization.load_pem_private_key( b(privateKey), password=None, backend=default_backend()) decrypted = privateKey.decrypt( base64.b64decode(encrypted), padding.OAEP( mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None ) ) print('decrypted: {}'.format(decrypted)) self.assertEqual(decrypted, original) token = engine.lookup('https://discovery.etcd.io/new') self.assertEqual(token.startswith( 'https://discovery.etcd.io/'), True) self.assertEqual(len(token), 58) self.assertEqual(len(engine.secrets), 13) with self.assertRaises(LookupError): localKey = engine.lookup('rsa_private.local') localKey = engine.lookup('rsa_public.local') if len(localKey) > 0: path = engine.get_shared_key_files()[0] with open(os.path.expanduser(path)) as stream: text = stream.read() stream.close() self.assertEqual(localKey.strip(), text.strip()) plogging.info("Successful lookup of local public key") def test_secrets(self): engine = PlumberyEngine() engine.secrets = {'hello': 'world'} engine.save_secrets(plan='test_engine.yaml') engine.secrets = {} engine.load_secrets(plan='test_engine.yaml') self.assertEqual(engine.secrets['hello'], 'world') engine.forget_secrets(plan='test_engine.yaml') self.assertEqual(os.path.isfile('.test_engine.secrets'), False) def test_keys(self): engine = PlumberyEngine() self.assertEqual(engine._sharedKeyFiles, []) with self.assertRaises(ValueError): engine.set_shared_key_files('this_does_not_exist') self.assertTrue(isinstance(engine.get_shared_key_files(), list)) self.assertEqual(engine._sharedKeyFiles, engine.get_shared_key_files()) with mock.patch.object(engine, 'get_shared_key_files') as patched: patched.return_value = [] files = ['*unknown-file*', '**me-too*'] with self.assertRaises(ValueError): engine.set_shared_key_files(files) file = os.path.abspath( os.path.dirname(__file__))+'/fixtures/dummy_rsa.pub' engine.set_shared_key_files(file) self.assertTrue(isinstance(engine.get_shared_key_files(), list)) self.assertEqual(engine.get_shared_key_files()[0], file) self.assertEqual(engine._sharedKeyFiles, engine.get_shared_key_files()) with mock.patch.object(engine, 'get_shared_key_files') as patched: patched.return_value = [] files = [file, file, file] engine.set_shared_key_files(files) self.assertEqual(engine._sharedKeyFiles, [file]) if 'SHARED_KEY' in os.environ: memory = os.environ["SHARED_KEY"] else: memory = None engine._sharedKeyFiles = [] os.environ["SHARED_KEY"] = 'this_does_not_exist' with self.assertRaises(ValueError): engine.set_shared_key_files() with self.assertRaises(ValueError): engine.get_shared_key_files() self.assertTrue(isinstance(engine._sharedKeyFiles, list)) engine._sharedKeyFiles = [] os.environ["SHARED_KEY"] = file engine.set_shared_key_files() self.assertTrue(isinstance(engine.get_shared_key_files(), list)) self.assertEqual(engine.get_shared_key_files()[0], file) self.assertEqual(engine._sharedKeyFiles, engine.get_shared_key_files()) if memory is None: os.environ.pop("SHARED_KEY") else: os.environ["SHARED_KEY"] = memory with mock.patch.object(engine, 'get_shared_key_files') as patched: patched.return_value = [] self.assertTrue(isinstance(engine.get_shared_key_files(), list)) engine.set_shared_key_files() self.assertTrue(plogging.foundErrors()) with self.assertRaises(ValueError): engine.set_shared_key_files('this_does_not_exist') file = os.path.abspath( os.path.dirname(__file__))+'/fixtures/dummy_rsa.pub' engine.set_shared_key_files(file) self.assertEqual(engine._sharedKeyFiles, [file]) def test_parser(self): args = parse_args(['fittings.yaml', 'build', 'web']) self.assertEqual(args.fittings, 'fittings.yaml') self.assertEqual(args.action, 'build') self.assertEqual(args.blueprints, ['web']) self.assertEqual(args.facilities, None) args = parse_args( ['fittings.yaml', 'build', 'web', '-p', 'parameters.yaml']) self.assertEqual(args.parameters, ['parameters.yaml']) args = parse_args( ['fittings.yaml', 'build', 'web', '-p', 'parameters.yaml', '-s']) self.assertEqual(args.parameters, ['parameters.yaml']) self.assertEqual(args.safe, True) args = parse_args( ['fittings.yaml', 'build', 'web', '-p', 'parameters.yaml', '-d']) self.assertEqual(args.parameters, ['parameters.yaml']) self.assertEqual(args.debug, True) args = parse_args(['fittings.yaml', 'build', 'web', '-s']) self.assertEqual(args.safe, True) args = parse_args(['fittings.yaml', 'build', 'web', '-d']) self.assertEqual(args.debug, True) self.assertEqual( plogging.getEffectiveLevel(), logging.DEBUG) args = parse_args(['fittings.yaml', 'build', 'web', '-q']) self.assertEqual(args.quiet, True) self.assertEqual( plogging.getEffectiveLevel(), logging.WARNING) args = parse_args(['fittings.yaml', 'start', '@NA12']) self.assertEqual(args.fittings, 'fittings.yaml') self.assertEqual(args.action, 'start') self.assertEqual(args.blueprints, None) self.assertEqual(args.facilities, ['NA12']) args = parse_args([ 'fittings.yaml', 'prepare', 'web', 'sql', '@NA9', '@NA12']) self.assertEqual(args.fittings, 'fittings.yaml') self.assertEqual(args.action, 'prepare') self.assertEqual(args.blueprints, ['web', 'sql']) self.assertEqual(args.facilities, ['NA9', 'NA12']) args = parse_args([ 'fittings.yaml', 'prepare', 'web', '@NA9', 'sql', '@NA12']) self.assertEqual(args.fittings, 'fittings.yaml') self.assertEqual(args.action, 'prepare') self.assertEqual(args.blueprints, ['web', 'sql']) self.assertEqual(args.facilities, ['NA9', 'NA12']) args = parse_args(['fittings.yaml', 'polish']) self.assertEqual(args.fittings, 'fittings.yaml') self.assertEqual(args.action, 'polish') self.assertEqual(args.blueprints, None) self.assertEqual(args.facilities, None) def test_main(self): with self.assertRaises(SystemExit): main(['fittings.yaml', 'build', 'web', '@EU6']) engine = PlumberyEngine() engine.set_fittings(myPlan) engine.set_user_name('fake_name') engine.set_user_password('fake_password') with self.assertRaises(SystemExit): main(['-v'], engine) with self.assertRaises(SystemExit): main(['fittings.yaml', 'build', 'web'], engine) with self.assertRaises(SystemExit): main(['fittings.yaml', 'build', 'web', '-v'], engine) with self.assertRaises(SystemExit): main(['fittings.yaml', 'build', 'web', '@EU6'], engine) def test_bad_args(self): engine = PlumberyEngine() engine.set_fittings(myPlan) with self.assertRaises(SystemExit): main(['bad args'], engine) with self.assertRaises(SystemExit): main(['fittings.yaml'], engine) with self.assertRaises(SystemExit): main(['fittings.yaml', 'xyz123', 'web'], engine) with self.assertRaises(SystemExit): main(['fittings.yaml', 'build', 'web', '@'], engine) def test_param_http(self): engine = PlumberyEngine() with self.assertRaises(TypeError): engine.set_parameters(('http://smee.com/params.yml')) def test_remote_params(self): engine = PlumberyEngine() with requests_mock.mock() as m: m.get('http://smee.com/params.yml', text=yaml.dump(myParameters)) engine.set_parameters('http://smee.com/params.yml') if __name__ == '__main__': import sys sys.exit(unittest.main())
apache-2.0
CoolProp/CoolProp
wrappers/Python/CoolProp/Plots/PsychScript.py
2
2020
# This file was auto-generated by the PsychChart.py script in wrappers/Python/CoolProp/Plots if __name__ == '__main__': import numpy, matplotlib from CoolProp.HumidAirProp import HAPropsSI from CoolProp.Plots.Plots import InlineLabel p = 101325 Tdb = numpy.linspace(-10, 60, 100) + 273.15 # Make the figure and the axes fig = matplotlib.pyplot.figure(figsize=(10, 8)) ax = fig.add_axes((0.1, 0.1, 0.85, 0.85)) # Saturation line w = [HAPropsSI('W', 'T', T, 'P', p, 'R', 1.0) for T in Tdb] ax.plot(Tdb - 273.15, w, lw=2) # Humidity lines RHValues = [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] for RH in RHValues: w = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb] ax.plot(Tdb - 273.15, w, 'r', lw=1) # Humidity lines for H in [-20000, -10000, 0, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000]: # Line goes from saturation to zero humidity ratio for this enthalpy T1 = HAPropsSI('T', 'H', H, 'P', p, 'R', 1.0) - 273.15 T0 = HAPropsSI('T', 'H', H, 'P', p, 'R', 0.0) - 273.15 w1 = HAPropsSI('W', 'H', H, 'P', p, 'R', 1.0) w0 = HAPropsSI('W', 'H', H, 'P', p, 'R', 0.0) ax.plot(numpy.r_[T1, T0], numpy.r_[w1, w0], 'r', lw=1) ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15) ax.set_ylim(0, 0.03) ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]") ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]") xv = Tdb # [K] for RH in [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]: yv = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb] y = HAPropsSI('W', 'P', p, 'H', 65000.000000, 'R', RH) T_K, w, rot = InlineLabel(xv, yv, y=y, axis=ax) string = r'$\phi$=' + '{s:0.0f}'.format(s=RH * 100) + '%' bbox_opts = dict(boxstyle='square,pad=0.0', fc='white', ec='None', alpha=0.5) ax.text(T_K - 273.15, w, string, rotation=rot, ha='center', va='center', bbox=bbox_opts) matplotlib.pyplot.show()
mit
CitoEngine/cito_engine
app/cito_engine/views/teams.py
1
2915
"""Copyright 2014 Cyrus Dasadia Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.shortcuts import get_object_or_404, redirect, render from django.contrib.auth.decorators import login_required from django.template import RequestContext from django.db.models.query_utils import Q from cito_engine.models import Team from cito_engine.forms import teams @login_required(login_url='/login/') def view_all_teams(request): if request.user.perms.access_level > 4: return render(request, 'unauthorized.html') page_title = 'Teams' box_title = page_title try: teams = Team.objects.all().order_by('name') except Team.DoesNotExist: teams = None return render(request, 'view_teams.html', locals()) @login_required(login_url='/login/') def edit_team(request, team_id): if request.user.perms.access_level > 2: return render(request, 'unauthorized.html') page_title = 'Editing team' box_title = page_title team = get_object_or_404(Team, pk=team_id) if request.method == 'POST': form = teams.TeamForm(request.POST) if form.is_valid(): team_name = form.cleaned_data.get('name') if Team.objects.filter(~Q(pk=team_id), name__iexact=team_name).count() > 0: errors = ['Team with name \"%s\" already exists.' % team_name] else: team.name = team_name team.description = form.cleaned_data.get('description') team.members = form.cleaned_data.get('members') team.save() return redirect('/teams/') else: form = teams.TeamForm(instance=team) return render(request, 'generic_form.html', locals()) @login_required(login_url='/login/') def add_team(request): if request.user.perms.access_level > 1: return render(request, 'unauthorized.html') page_title = 'Add a new team' box_title = page_title if request.method == 'POST': form = teams.TeamForm(request.POST) if form.is_valid(): team_name = form.cleaned_data['name'] if Team.objects.filter(name__iexact=team_name).count() > 0: errors = ['Team with name \"%s\" already exists.' % team_name] else: form.save() return redirect('/teams/') else: form = teams.TeamForm() return render(request, 'generic_form.html', locals())
apache-2.0
tylercal/dragonfly
dragonfly/engines/backend_natlink/__init__.py
1
2141
# # This file is part of Dragonfly. # (c) Copyright 2007, 2008 by Christo Butcher # Licensed under the LGPL. # # Dragonfly is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Dragonfly is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with Dragonfly. If not, see # <http://www.gnu.org/licenses/>. # """ SR back-end package for DNS and Natlink ============================================================================ """ import logging _log = logging.getLogger("engine.natlink") #--------------------------------------------------------------------------- # Module level singleton instance of this engine implementation. _engine = None def is_engine_available(): """ Check whether Natlink is available. """ global _engine if _engine: return True # Attempt to import natlink. try: import natlink except ImportError as e: _log.info("Failed to import natlink package: %s" % (e,)) return False except Exception as e: _log.exception("Exception during import of natlink package: %s" % (e,)) return False try: if natlink.isNatSpeakRunning(): return True else: _log.info("Natlink is available but NaturallySpeaking is not" " running.") return False except Exception as e: _log.exception("Exception during natlink.isNatSpeakRunning(): %s" % (e,)) return False def get_engine(): """ Retrieve the Natlink back-end engine object. """ global _engine if not _engine: from .engine import NatlinkEngine _engine = NatlinkEngine() return _engine
lgpl-3.0
Eigenlabs/EigenD
tools/packages/SCons/compat/_scons_sets.py
28
19733
"""Classes to represent arbitrary sets (including sets of sets). This module implements sets using dictionaries whose values are ignored. The usual operations (union, intersection, deletion, etc.) are provided as both methods and operators. Important: sets are not sequences! While they support 'x in s', 'len(s)', and 'for x in s', none of those operations are unique for sequences; for example, mappings support all three as well. The characteristic operation for sequences is subscripting with small integers: s[i], for i in range(len(s)). Sets don't support subscripting at all. Also, sequences allow multiple occurrences and their elements have a definite order; sets on the other hand don't record multiple occurrences and don't remember the order of element insertion (which is why they don't support s[i]). The following classes are provided: BaseSet -- All the operations common to both mutable and immutable sets. This is an abstract class, not meant to be directly instantiated. Set -- Mutable sets, subclass of BaseSet; not hashable. ImmutableSet -- Immutable sets, subclass of BaseSet; hashable. An iterable argument is mandatory to create an ImmutableSet. _TemporarilyImmutableSet -- A wrapper around a Set, hashable, giving the same hash value as the immutable set equivalent would have. Do not use this class directly. Only hashable objects can be added to a Set. In particular, you cannot really add a Set as an element to another Set; if you try, what is actually added is an ImmutableSet built from it (it compares equal to the one you tried adding). When you ask if `x in y' where x is a Set and y is a Set or ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and what's tested is actually `z in y'. """ # Code history: # # - Greg V. Wilson wrote the first version, using a different approach # to the mutable/immutable problem, and inheriting from dict. # # - Alex Martelli modified Greg's version to implement the current # Set/ImmutableSet approach, and make the data an attribute. # # - Guido van Rossum rewrote much of the code, made some API changes, # and cleaned up the docstrings. # # - Raymond Hettinger added a number of speedups and other # improvements. from __future__ import generators try: from itertools import ifilter, ifilterfalse except ImportError: # Code to make the module run under Py2.2 def ifilter(predicate, iterable): if predicate is None: def predicate(x): return x for x in iterable: if predicate(x): yield x def ifilterfalse(predicate, iterable): if predicate is None: def predicate(x): return x for x in iterable: if not predicate(x): yield x try: True, False except NameError: True, False = (0==0, 0!=0) __all__ = ['BaseSet', 'Set', 'ImmutableSet'] class BaseSet(object): """Common base class for mutable and immutable sets.""" __slots__ = ['_data'] # Constructor def __init__(self): """This is an abstract class.""" # Don't call this from a concrete subclass! if self.__class__ is BaseSet: raise TypeError, ("BaseSet is an abstract class. " "Use Set or ImmutableSet.") # Standard protocols: __len__, __repr__, __str__, __iter__ def __len__(self): """Return the number of elements of a set.""" return len(self._data) def __repr__(self): """Return string representation of a set. This looks like 'Set([<list of elements>])'. """ return self._repr() # __str__ is the same as __repr__ __str__ = __repr__ def _repr(self, sorted=False): elements = self._data.keys() if sorted: elements.sort() return '%s(%r)' % (self.__class__.__name__, elements) def __iter__(self): """Return an iterator over the elements or a set. This is the keys iterator for the underlying dict. """ return self._data.iterkeys() # Three-way comparison is not supported. However, because __eq__ is # tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and # then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this # case). def __cmp__(self, other): raise TypeError, "can't compare sets using cmp()" # Equality comparisons using the underlying dicts. Mixed-type comparisons # are allowed here, where Set == z for non-Set z always returns False, # and Set != z always True. This allows expressions like "x in y" to # give the expected result when y is a sequence of mixed types, not # raising a pointless TypeError just because y contains a Set, or x is # a Set and y contain's a non-set ("in" invokes only __eq__). # Subtle: it would be nicer if __eq__ and __ne__ could return # NotImplemented instead of True or False. Then the other comparand # would get a chance to determine the result, and if the other comparand # also returned NotImplemented then it would fall back to object address # comparison (which would always return False for __eq__ and always # True for __ne__). However, that doesn't work, because this type # *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented, # Python tries __cmp__ next, and the __cmp__ here then raises TypeError. def __eq__(self, other): if isinstance(other, BaseSet): return self._data == other._data else: return False def __ne__(self, other): if isinstance(other, BaseSet): return self._data != other._data else: return True # Copying operations def copy(self): """Return a shallow copy of a set.""" result = self.__class__() result._data.update(self._data) return result __copy__ = copy # For the copy module def __deepcopy__(self, memo): """Return a deep copy of a set; used by copy module.""" # This pre-creates the result and inserts it in the memo # early, in case the deep copy recurses into another reference # to this same set. A set can't be an element of itself, but # it can certainly contain an object that has a reference to # itself. from copy import deepcopy result = self.__class__() memo[id(self)] = result data = result._data value = True for elt in self: data[deepcopy(elt, memo)] = value return result # Standard set operations: union, intersection, both differences. # Each has an operator version (e.g. __or__, invoked with |) and a # method version (e.g. union). # Subtle: Each pair requires distinct code so that the outcome is # correct when the type of other isn't suitable. For example, if # we did "union = __or__" instead, then Set().union(3) would return # NotImplemented instead of raising TypeError (albeit that *why* it # raises TypeError as-is is also a bit subtle). def __or__(self, other): """Return the union of two sets as a new set. (I.e. all elements that are in either set.) """ if not isinstance(other, BaseSet): return NotImplemented return self.union(other) def union(self, other): """Return the union of two sets as a new set. (I.e. all elements that are in either set.) """ result = self.__class__(self) result._update(other) return result def __and__(self, other): """Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.) """ if not isinstance(other, BaseSet): return NotImplemented return self.intersection(other) def intersection(self, other): """Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.) """ if not isinstance(other, BaseSet): other = Set(other) if len(self) <= len(other): little, big = self, other else: little, big = other, self common = ifilter(big._data.has_key, little) return self.__class__(common) def __xor__(self, other): """Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.) """ if not isinstance(other, BaseSet): return NotImplemented return self.symmetric_difference(other) def symmetric_difference(self, other): """Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.) """ result = self.__class__() data = result._data value = True selfdata = self._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data for elt in ifilterfalse(otherdata.has_key, selfdata): data[elt] = value for elt in ifilterfalse(selfdata.has_key, otherdata): data[elt] = value return result def __sub__(self, other): """Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.) """ if not isinstance(other, BaseSet): return NotImplemented return self.difference(other) def difference(self, other): """Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.) """ result = self.__class__() data = result._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data value = True for elt in ifilterfalse(otherdata.has_key, self): data[elt] = value return result # Membership test def __contains__(self, element): """Report whether an element is a member of a set. (Called in response to the expression `element in self'.) """ try: return element in self._data except TypeError: transform = getattr(element, "__as_temporarily_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught return transform() in self._data # Subset and superset test def issubset(self, other): """Report whether another set contains this set.""" self._binary_sanity_check(other) if len(self) > len(other): # Fast check for obvious cases return False for elt in ifilterfalse(other._data.has_key, self): return False return True def issuperset(self, other): """Report whether this set contains another set.""" self._binary_sanity_check(other) if len(self) < len(other): # Fast check for obvious cases return False for elt in ifilterfalse(self._data.has_key, other): return False return True # Inequality comparisons using the is-subset relation. __le__ = issubset __ge__ = issuperset def __lt__(self, other): self._binary_sanity_check(other) return len(self) < len(other) and self.issubset(other) def __gt__(self, other): self._binary_sanity_check(other) return len(self) > len(other) and self.issuperset(other) # Assorted helpers def _binary_sanity_check(self, other): # Check that the other argument to a binary operation is also # a set, raising a TypeError otherwise. if not isinstance(other, BaseSet): raise TypeError, "Binary operation only permitted between sets" def _compute_hash(self): # Calculate hash code for a set by xor'ing the hash codes of # the elements. This ensures that the hash code does not depend # on the order in which elements are added to the set. This is # not called __hash__ because a BaseSet should not be hashable; # only an ImmutableSet is hashable. result = 0 for elt in self: result ^= hash(elt) return result def _update(self, iterable): # The main loop for update() and the subclass __init__() methods. data = self._data # Use the fast update() method when a dictionary is available. if isinstance(iterable, BaseSet): data.update(iterable._data) return value = True if type(iterable) in (list, tuple, xrange): # Optimized: we know that __iter__() and next() can't # raise TypeError, so we can move 'try:' out of the loop. it = iter(iterable) while True: try: for element in it: data[element] = value return except TypeError: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught data[transform()] = value else: # Safe: only catch TypeError where intended for element in iterable: try: data[element] = value except TypeError: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught data[transform()] = value class ImmutableSet(BaseSet): """Immutable set class.""" __slots__ = ['_hashcode'] # BaseSet + hashing def __init__(self, iterable=None): """Construct an immutable set from an optional iterable.""" self._hashcode = None self._data = {} if iterable is not None: self._update(iterable) def __hash__(self): if self._hashcode is None: self._hashcode = self._compute_hash() return self._hashcode def __getstate__(self): return self._data, self._hashcode def __setstate__(self, state): self._data, self._hashcode = state class Set(BaseSet): """ Mutable set class.""" __slots__ = [] # BaseSet + operations requiring mutability; no hashing def __init__(self, iterable=None): """Construct a set from an optional iterable.""" self._data = {} if iterable is not None: self._update(iterable) def __getstate__(self): # getstate's results are ignored if it is not return self._data, def __setstate__(self, data): self._data, = data def __hash__(self): """A Set cannot be hashed.""" # We inherit object.__hash__, so we must deny this explicitly raise TypeError, "Can't hash a Set, only an ImmutableSet." # In-place union, intersection, differences. # Subtle: The xyz_update() functions deliberately return None, # as do all mutating operations on built-in container types. # The __xyz__ spellings have to return self, though. def __ior__(self, other): """Update a set with the union of itself and another.""" self._binary_sanity_check(other) self._data.update(other._data) return self def union_update(self, other): """Update a set with the union of itself and another.""" self._update(other) def __iand__(self, other): """Update a set with the intersection of itself and another.""" self._binary_sanity_check(other) self._data = (self & other)._data return self def intersection_update(self, other): """Update a set with the intersection of itself and another.""" if isinstance(other, BaseSet): self &= other else: self._data = (self.intersection(other))._data def __ixor__(self, other): """Update a set with the symmetric difference of itself and another.""" self._binary_sanity_check(other) self.symmetric_difference_update(other) return self def symmetric_difference_update(self, other): """Update a set with the symmetric difference of itself and another.""" data = self._data value = True if not isinstance(other, BaseSet): other = Set(other) if self is other: self.clear() for elt in other: if elt in data: del data[elt] else: data[elt] = value def __isub__(self, other): """Remove all elements of another set from this set.""" self._binary_sanity_check(other) self.difference_update(other) return self def difference_update(self, other): """Remove all elements of another set from this set.""" data = self._data if not isinstance(other, BaseSet): other = Set(other) if self is other: self.clear() for elt in ifilter(data.has_key, other): del data[elt] # Python dict-like mass mutations: update, clear def update(self, iterable): """Add all values from an iterable (such as a list or file).""" self._update(iterable) def clear(self): """Remove all elements from this set.""" self._data.clear() # Single-element mutations: add, remove, discard def add(self, element): """Add an element to a set. This has no effect if the element is already present. """ try: self._data[element] = True except TypeError: transform = getattr(element, "__as_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught self._data[transform()] = True def remove(self, element): """Remove an element from a set; it must be a member. If the element is not a member, raise a KeyError. """ try: del self._data[element] except TypeError: transform = getattr(element, "__as_temporarily_immutable__", None) if transform is None: raise # re-raise the TypeError exception we caught del self._data[transform()] def discard(self, element): """Remove an element from a set if it is a member. If the element is not a member, do nothing. """ try: self.remove(element) except KeyError: pass def pop(self): """Remove and return an arbitrary set element.""" return self._data.popitem()[0] def __as_immutable__(self): # Return a copy of self as an immutable set return ImmutableSet(self) def __as_temporarily_immutable__(self): # Return self wrapped in a temporarily immutable set return _TemporarilyImmutableSet(self) class _TemporarilyImmutableSet(BaseSet): # Wrap a mutable set as if it was temporarily immutable. # This only supplies hashing and equality comparisons. def __init__(self, set): self._set = set self._data = set._data # Needed by ImmutableSet.__eq__() def __hash__(self): return self._set._compute_hash() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
gpl-3.0
0jpq0/kbengine
kbe/src/lib/python/Lib/test/test_importlib/extension/test_finder.py
81
1317
from .. import abc from .. import util as test_util from . import util machinery = test_util.import_importlib('importlib.machinery') import unittest import warnings # XXX find_spec tests class FinderTests(abc.FinderTests): """Test the finder for extension modules.""" def find_module(self, fullname): importer = self.machinery.FileFinder(util.PATH, (self.machinery.ExtensionFileLoader, self.machinery.EXTENSION_SUFFIXES)) with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) return importer.find_module(fullname) def test_module(self): self.assertTrue(self.find_module(util.NAME)) # No extension module as an __init__ available for testing. test_package = test_package_in_package = None # No extension module in a package available for testing. test_module_in_package = None # Extension modules cannot be an __init__ for a package. test_package_over_module = None def test_failure(self): self.assertIsNone(self.find_module('asdfjkl;')) Frozen_FinderTests, Source_FinderTests = test_util.test_both( FinderTests, machinery=machinery) if __name__ == '__main__': unittest.main()
lgpl-3.0
villaverde/iredadmin
tools/ira_tool_lib.py
1
1823
"""Library used by other scripts under tools/ directory.""" # Author: Zhang Huangbin <[email protected]> import os import sys import logging import web debug = False # Set True to print SQL queries. web.config.debug = debug os.environ['LC_ALL'] = 'C' rootdir = os.path.abspath(os.path.dirname(__file__)) + '/../' sys.path.insert(0, rootdir) import settings from libs import iredutils backend = settings.backend if backend in ['ldap', 'mysql']: sql_dbn = 'mysql' elif backend in ['pgsql']: sql_dbn = 'postgres' else: sys.exit('Error: Unsupported backend (%s).' % backend) # Config logging logging.basicConfig(level=logging.INFO, format='* [%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger('iRedAdmin-Pro') def print_error(msg): print '< ERROR > ' + msg def get_db_conn(db): try: conn = web.database(dbn=sql_dbn, host=settings.__dict__[db + '_db_host'], port=int(settings.__dict__[db + '_db_port']), db=settings.__dict__[db + '_db_name'], user=settings.__dict__[db + '_db_user'], pw=settings.__dict__[db + '_db_password']) conn.supports_multiple_insert = True return conn except Exception, e: print_error(e) # Log in `iredadmin.log` def log_to_iredadmin(msg, event, admin='', loglevel='info'): conn = get_db_conn('iredadmin') try: conn.insert('log', admin=admin, event=event, loglevel=loglevel, msg=str(msg), ip='127.0.0.1', timestamp=iredutils.get_gmttime()) except: pass return None
gpl-2.0
ddurieux/alignak
alignak/objects/escalation.py
1
11934
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors # # This file is part of Alignak. # # Alignak is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Alignak is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Alignak. If not, see <http://www.gnu.org/licenses/>. # # # This file incorporates work covered by the following copyright and # permission notice: # # Copyright (C) 2009-2014: # Hartmut Goebel, [email protected] # Sebastien Coavoux, [email protected] # Guillaume Bour, [email protected] # aviau, [email protected] # Nicolas Dupeux, [email protected] # Grégory Starck, [email protected] # Gerhard Lausser, [email protected] # Andrew McGilvray, [email protected] # Christophe Simon, [email protected] # Jean Gabes, [email protected] # Romain Forlot, [email protected] # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. from item import Item, Items from alignak.util import strip_and_uniq from alignak.property import BoolProp, IntegerProp, StringProp, ListProp from alignak.log import logger _special_properties = ('contacts', 'contact_groups', 'first_notification_time', 'last_notification_time') _special_properties_time_based = ('contacts', 'contact_groups', 'first_notification', 'last_notification') class Escalation(Item): id = 1 # zero is always special in database, so we do not take risk here my_type = 'escalation' properties = Item.properties.copy() properties.update({ 'escalation_name': StringProp(), 'first_notification': IntegerProp(), 'last_notification': IntegerProp(), 'first_notification_time': IntegerProp(), 'last_notification_time': IntegerProp(), # by default don't use the notification_interval defined in # the escalation, but the one defined by the object 'notification_interval': IntegerProp(default=-1), 'escalation_period': StringProp(default=''), 'escalation_options': ListProp(default=['d', 'u', 'r', 'w', 'c'], split_on_coma=True), 'contacts': ListProp(default=[], split_on_coma=True), 'contact_groups': ListProp(default=[], split_on_coma=True), }) running_properties = Item.running_properties.copy() running_properties.update({ 'time_based': BoolProp(default=False), }) # For debugging purpose only (nice name) def get_name(self): return self.escalation_name # Return True if: # *time in in escalation_period or we do not have escalation_period # *status is in escalation_options # *the notification number is in our interval [[first_notification .. last_notification]] # if we are a classic escalation. # *If we are time based, we check if the time that we were in notification # is in our time interval def is_eligible(self, t, status, notif_number, in_notif_time, interval): small_states = { 'WARNING': 'w', 'UNKNOWN': 'u', 'CRITICAL': 'c', 'RECOVERY': 'r', 'FLAPPING': 'f', 'DOWNTIME': 's', 'DOWN': 'd', 'UNREACHABLE': 'u', 'OK': 'o', 'UP': 'o' } # If we are not time based, we check notification numbers: if not self.time_based: # Begin with the easy cases if notif_number < self.first_notification: return False # self.last_notification = 0 mean no end if self.last_notification != 0 and notif_number > self.last_notification: return False # Else we are time based, we must check for the good value else: # Begin with the easy cases if in_notif_time < self.first_notification_time * interval: return False # self.last_notification = 0 mean no end if self.last_notification_time != 0 and \ in_notif_time > self.last_notification_time * interval: return False # If our status is not good, we bail out too if status in small_states and small_states[status] not in self.escalation_options: return False # Maybe the time is not in our escalation_period if self.escalation_period is not None and not self.escalation_period.is_time_valid(t): return False # Ok, I do not see why not escalade. So it's True :) return True # t = the reference time def get_next_notif_time(self, t_wished, status, creation_time, interval): small_states = {'WARNING': 'w', 'UNKNOWN': 'u', 'CRITICAL': 'c', 'RECOVERY': 'r', 'FLAPPING': 'f', 'DOWNTIME': 's', 'DOWN': 'd', 'UNREACHABLE': 'u', 'OK': 'o', 'UP': 'o'} # If we are not time based, we bail out! if not self.time_based: return None # Check if we are valid if status in small_states and small_states[status] not in self.escalation_options: return None # Look for the min of our future validity start = self.first_notification_time * interval + creation_time # If we are after the classic next time, we are not asking for a smaller interval if start > t_wished: return None # Maybe the time we found is not a valid one.... if self.escalation_period is not None and not self.escalation_period.is_time_valid(start): return None # Ok so I ask for my start as a possibility for the next notification time return start # Check is required prop are set: # template are always correct # contacts OR contactgroups is need def is_correct(self): state = True cls = self.__class__ # If we got the _time parameters, we are time based. Unless, we are not :) if hasattr(self, 'first_notification_time') or hasattr(self, 'last_notification_time'): self.time_based = True special_properties = _special_properties_time_based else: # classic ones special_properties = _special_properties for prop, entry in cls.properties.items(): if prop not in special_properties: if not hasattr(self, prop) and entry.required: logger.info('%s: I do not have %s', self.get_name(), prop) state = False # Bad boy... # Raised all previously saw errors like unknown contacts and co if self.configuration_errors != []: state = False for err in self.configuration_errors: logger.info(err) # Ok now we manage special cases... if not hasattr(self, 'contacts') and not hasattr(self, 'contact_groups'): logger.info('%s: I do not have contacts nor contact_groups', self.get_name()) state = False # If time_based or not, we do not check all properties if self.time_based: if not hasattr(self, 'first_notification_time'): logger.info('%s: I do not have first_notification_time', self.get_name()) state = False if not hasattr(self, 'last_notification_time'): logger.info('%s: I do not have last_notification_time', self.get_name()) state = False else: # we check classical properties if not hasattr(self, 'first_notification'): logger.info('%s: I do not have first_notification', self.get_name()) state = False if not hasattr(self, 'last_notification'): logger.info('%s: I do not have last_notification', self.get_name()) state = False return state class Escalations(Items): name_property = "escalation_name" inner_class = Escalation def linkify(self, timeperiods, contacts, services, hosts): self.linkify_with_timeperiods(timeperiods, 'escalation_period') self.linkify_with_contacts(contacts) self.linkify_es_by_s(services) self.linkify_es_by_h(hosts) def add_escalation(self, es): self.add_item(es) # Will register escalations into service.escalations def linkify_es_by_s(self, services): for es in self: # If no host, no hope of having a service if not (hasattr(es, 'host_name') and hasattr(es, 'service_description')): continue es_hname, sdesc = es.host_name, es.service_description if '' in (es_hname.strip(), sdesc.strip()): continue for hname in strip_and_uniq(es_hname.split(',')): if sdesc.strip() == '*': slist = services.find_srvs_by_hostname(hname) if slist is not None: for s in slist: s.escalations.append(es) else: for sname in strip_and_uniq(sdesc.split(',')): s = services.find_srv_by_name_and_hostname(hname, sname) if s is not None: # print "Linking service", s.get_name(), 'with me', es.get_name() s.escalations.append(es) # print "Now service", s.get_name(), 'have', s.escalations # Will register escalations into host.escalations def linkify_es_by_h(self, hosts): for es in self: # If no host, no hope of having a service if (not hasattr(es, 'host_name') or es.host_name.strip() == '' or (hasattr(es, 'service_description') and es.service_description.strip() != '')): continue # I must be NOT a escalation on for service for hname in strip_and_uniq(es.host_name.split(',')): h = hosts.find_by_name(hname) if h is not None: # print "Linking host", h.get_name(), 'with me', es.get_name() h.escalations.append(es) # print "Now host", h.get_name(), 'have', h.escalations # We look for contacts property in contacts and def explode(self, hosts, hostgroups, contactgroups): for i in self: # items::explode_host_groups_into_hosts # take all hosts from our hostgroup_name into our host_name property self.explode_host_groups_into_hosts(i, hosts, hostgroups) # items::explode_contact_groups_into_contacts # take all contacts from our contact_groups into our contact property self.explode_contact_groups_into_contacts(i, contactgroups)
agpl-3.0
masamichi/bite-project
deps/gdata-python-client/src/gdata/tlslite/utils/xmltools.py
101
7380
"""Helper functions for XML. This module has misc. helper functions for working with XML DOM nodes.""" from compat import * import os import re if os.name == "java": # Only for Jython from javax.xml.parsers import * import java builder = DocumentBuilderFactory.newInstance().newDocumentBuilder() def parseDocument(s): stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes()) return builder.parse(stream) else: from xml.dom import minidom from xml.sax import saxutils def parseDocument(s): return minidom.parseString(s) def parseAndStripWhitespace(s): try: element = parseDocument(s).documentElement except BaseException, e: raise SyntaxError(str(e)) stripWhitespace(element) return element #Goes through a DOM tree and removes whitespace besides child elements, #as long as this whitespace is correctly tab-ified def stripWhitespace(element, tab=0): element.normalize() lastSpacer = "\n" + ("\t"*tab) spacer = lastSpacer + "\t" #Zero children aren't allowed (i.e. <empty/>) #This makes writing output simpler, and matches Canonical XML if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython raise SyntaxError("Empty XML elements not allowed") #If there's a single child, it must be text context if element.childNodes.length==1: if element.firstChild.nodeType == element.firstChild.TEXT_NODE: #If it's an empty element, remove if element.firstChild.data == lastSpacer: element.removeChild(element.firstChild) return #If not text content, give an error elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE: raise SyntaxError("Bad whitespace under '%s'" % element.tagName) else: raise SyntaxError("Unexpected node type in XML document") #Otherwise there's multiple child element child = element.firstChild while child: if child.nodeType == child.ELEMENT_NODE: stripWhitespace(child, tab+1) child = child.nextSibling elif child.nodeType == child.TEXT_NODE: if child == element.lastChild: if child.data != lastSpacer: raise SyntaxError("Bad whitespace under '%s'" % element.tagName) elif child.data != spacer: raise SyntaxError("Bad whitespace under '%s'" % element.tagName) next = child.nextSibling element.removeChild(child) child = next else: raise SyntaxError("Unexpected node type in XML document") def checkName(element, name): if element.nodeType != element.ELEMENT_NODE: raise SyntaxError("Missing element: '%s'" % name) if name == None: return if element.tagName != name: raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName)) def getChild(element, index, name=None): if element.nodeType != element.ELEMENT_NODE: raise SyntaxError("Wrong node type in getChild()") child = element.childNodes.item(index) if child == None: raise SyntaxError("Missing child: '%s'" % name) checkName(child, name) return child def getChildIter(element, index): class ChildIter: def __init__(self, element, index): self.element = element self.index = index def next(self): if self.index < len(self.element.childNodes): retVal = self.element.childNodes.item(self.index) self.index += 1 else: retVal = None return retVal def checkEnd(self): if self.index != len(self.element.childNodes): raise SyntaxError("Too many elements under: '%s'" % self.element.tagName) return ChildIter(element, index) def getChildOrNone(element, index): if element.nodeType != element.ELEMENT_NODE: raise SyntaxError("Wrong node type in getChild()") child = element.childNodes.item(index) return child def getLastChild(element, index, name=None): if element.nodeType != element.ELEMENT_NODE: raise SyntaxError("Wrong node type in getLastChild()") child = element.childNodes.item(index) if child == None: raise SyntaxError("Missing child: '%s'" % name) if child != element.lastChild: raise SyntaxError("Too many elements under: '%s'" % element.tagName) checkName(child, name) return child #Regular expressions for syntax-checking attribute and element content nsRegEx = "http://trevp.net/cryptoID\Z" cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z" urlRegEx = "http(s)?://.{1,100}\Z" sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z" base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z" certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z" keyRegEx = "[A-Z]\Z" keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z" dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z" shortStringRegEx = ".{1,100}\Z" exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z" notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1 booleanRegEx = "(true)|(false)" def getReqAttribute(element, attrName, regEx=""): if element.nodeType != element.ELEMENT_NODE: raise SyntaxError("Wrong node type in getReqAttribute()") value = element.getAttribute(attrName) if not value: raise SyntaxError("Missing Attribute: " + attrName) if not re.match(regEx, value): raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) element.removeAttribute(attrName) return str(value) #de-unicode it; this is needed for bsddb, for example def getAttribute(element, attrName, regEx=""): if element.nodeType != element.ELEMENT_NODE: raise SyntaxError("Wrong node type in getAttribute()") value = element.getAttribute(attrName) if value: if not re.match(regEx, value): raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) element.removeAttribute(attrName) return str(value) #de-unicode it; this is needed for bsddb, for example def checkNoMoreAttributes(element): if element.nodeType != element.ELEMENT_NODE: raise SyntaxError("Wrong node type in checkNoMoreAttributes()") if element.attributes.length!=0: raise SyntaxError("Extra attributes on '%s'" % element.tagName) def getText(element, regEx=""): textNode = element.firstChild if textNode == None: raise SyntaxError("Empty element '%s'" % element.tagName) if textNode.nodeType != textNode.TEXT_NODE: raise SyntaxError("Non-text node: '%s'" % element.tagName) if not re.match(regEx, textNode.data): raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data)) return str(textNode.data) #de-unicode it; this is needed for bsddb, for example #Function for adding tabs to a string def indent(s, steps, ch="\t"): tabs = ch*steps if s[-1] != "\n": s = tabs + s.replace("\n", "\n"+tabs) else: s = tabs + s.replace("\n", "\n"+tabs) s = s[ : -len(tabs)] return s def escape(s): return saxutils.escape(s)
apache-2.0
frederick-masterton/django
django/contrib/gis/tests/layermap/tests.py
22
14357
# coding: utf-8 from __future__ import unicode_literals from copy import copy from decimal import Decimal import os import unittest from unittest import skipUnless from django.contrib.gis.gdal import HAS_GDAL from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql from django.db import router from django.conf import settings from django.test import TestCase from django.utils._os import upath if HAS_GDAL: from django.contrib.gis.utils.layermapping import (LayerMapping, LayerMapError, InvalidDecimal, MissingForeignKey) from django.contrib.gis.gdal import DataSource from .models import ( City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State, city_mapping, co_mapping, cofeat_mapping, inter_mapping) shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), os.pardir, 'data')) city_shp = os.path.join(shp_path, 'cities', 'cities.shp') co_shp = os.path.join(shp_path, 'counties', 'counties.shp') inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp') invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp') # Dictionaries to hold what's expected in the county shapefile. NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo'] NUMS = [1, 2, 1, 19, 1] # Number of polygons for each. STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado'] @skipUnless(HAS_GDAL and HAS_SPATIAL_DB, "GDAL and spatial db are required.") class LayerMapTest(TestCase): def test_init(self): "Testing LayerMapping initialization." # Model field that does not exist. bad1 = copy(city_mapping) bad1['foobar'] = 'FooField' # Shapefile field that does not exist. bad2 = copy(city_mapping) bad2['name'] = 'Nombre' # Nonexistent geographic field type. bad3 = copy(city_mapping) bad3['point'] = 'CURVE' # Incrementing through the bad mapping dictionaries and # ensuring that a LayerMapError is raised. for bad_map in (bad1, bad2, bad3): with self.assertRaises(LayerMapError): LayerMapping(City, city_shp, bad_map) # A LookupError should be thrown for bogus encodings. with self.assertRaises(LookupError): LayerMapping(City, city_shp, city_mapping, encoding='foobar') def test_simple_layermap(self): "Test LayerMapping import of a simple point shapefile." # Setting up for the LayerMapping. lm = LayerMapping(City, city_shp, city_mapping) lm.save() # There should be three cities in the shape file. self.assertEqual(3, City.objects.count()) # Opening up the shapefile, and verifying the values in each # of the features made it to the model. ds = DataSource(city_shp) layer = ds[0] for feat in layer: city = City.objects.get(name=feat['Name'].value) self.assertEqual(feat['Population'].value, city.population) self.assertEqual(Decimal(str(feat['Density'])), city.density) self.assertEqual(feat['Created'].value, city.dt) # Comparing the geometries. pnt1, pnt2 = feat.geom, city.point self.assertAlmostEqual(pnt1.x, pnt2.x, 5) self.assertAlmostEqual(pnt1.y, pnt2.y, 5) def test_layermap_strict(self): "Testing the `strict` keyword, and import of a LineString shapefile." # When the `strict` keyword is set an error encountered will force # the importation to stop. with self.assertRaises(InvalidDecimal): lm = LayerMapping(Interstate, inter_shp, inter_mapping) lm.save(silent=True, strict=True) Interstate.objects.all().delete() # This LayerMapping should work b/c `strict` is not set. lm = LayerMapping(Interstate, inter_shp, inter_mapping) lm.save(silent=True) # Two interstate should have imported correctly. self.assertEqual(2, Interstate.objects.count()) # Verifying the values in the layer w/the model. ds = DataSource(inter_shp) # Only the first two features of this shapefile are valid. valid_feats = ds[0][:2] for feat in valid_feats: istate = Interstate.objects.get(name=feat['Name'].value) if feat.fid == 0: self.assertEqual(Decimal(str(feat['Length'])), istate.length) elif feat.fid == 1: # Everything but the first two decimal digits were truncated, # because the Interstate model's `length` field has decimal_places=2. self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2) for p1, p2 in zip(feat.geom, istate.path): self.assertAlmostEqual(p1[0], p2[0], 6) self.assertAlmostEqual(p1[1], p2[1], 6) def county_helper(self, county_feat=True): "Helper function for ensuring the integrity of the mapped County models." for name, n, st in zip(NAMES, NUMS, STATES): # Should only be one record b/c of `unique` keyword. c = County.objects.get(name=name) self.assertEqual(n, len(c.mpoly)) self.assertEqual(st, c.state.name) # Checking ForeignKey mapping. # Multiple records because `unique` was not set. if county_feat: qs = CountyFeat.objects.filter(name=name) self.assertEqual(n, qs.count()) def test_layermap_unique_multigeometry_fk(self): "Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings." # All the following should work. try: # Telling LayerMapping that we want no transformations performed on the data. lm = LayerMapping(County, co_shp, co_mapping, transform=False) # Specifying the source spatial reference system via the `source_srs` keyword. lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269) lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83') # Unique may take tuple or string parameters. for arg in ('name', ('name', 'mpoly')): lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg) except: self.fail('No exception should be raised for proper use of keywords.') # Testing invalid params for the `unique` keyword. for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))): self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg) # No source reference system defined in the shapefile, should raise an error. if not mysql: self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping) # Passing in invalid ForeignKey mapping parameters -- must be a dictionary # mapping for the model the ForeignKey points to. bad_fk_map1 = copy(co_mapping) bad_fk_map1['state'] = 'name' bad_fk_map2 = copy(co_mapping) bad_fk_map2['state'] = {'nombre': 'State'} self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False) self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False) # There exist no State models for the ForeignKey mapping to work -- should raise # a MissingForeignKey exception (this error would be ignored if the `strict` # keyword is not set). lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name') self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True) # Now creating the state models so the ForeignKey mapping may work. State.objects.bulk_create([ State(name='Colorado'), State(name='Hawaii'), State(name='Texas') ]) # If a mapping is specified as a collection, all OGR fields that # are not collections will be converted into them. For example, # a Point column would be converted to MultiPoint. Other things being done # w/the keyword args: # `transform=False`: Specifies that no transform is to be done; this # has the effect of ignoring the spatial reference check (because the # county shapefile does not have implicit spatial reference info). # # `unique='name'`: Creates models on the condition that they have # unique county names; geometries from each feature however will be # appended to the geometry collection of the unique model. Thus, # all of the various islands in Honolulu county will be in in one # database record with a MULTIPOLYGON type. lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name') lm.save(silent=True, strict=True) # A reference that doesn't use the unique keyword; a new database record will # created for each polygon. lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False) lm.save(silent=True, strict=True) # The county helper is called to ensure integrity of County models. self.county_helper() def test_test_fid_range_step(self): "Tests the `fid_range` keyword and the `step` keyword of .save()." # Function for clearing out all the counties before testing. def clear_counties(): County.objects.all().delete() State.objects.bulk_create([ State(name='Colorado'), State(name='Hawaii'), State(name='Texas') ]) # Initializing the LayerMapping object to use in these tests. lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name') # Bad feature id ranges should raise a type error. bad_ranges = (5.0, 'foo', co_shp) for bad in bad_ranges: self.assertRaises(TypeError, lm.save, fid_range=bad) # Step keyword should not be allowed w/`fid_range`. fr = (3, 5) # layer[3:5] self.assertRaises(LayerMapError, lm.save, fid_range=fr, step=10) lm.save(fid_range=fr) # Features IDs 3 & 4 are for Galveston County, Texas -- only # one model is returned because the `unique` keyword was set. qs = County.objects.all() self.assertEqual(1, qs.count()) self.assertEqual('Galveston', qs[0].name) # Features IDs 5 and beyond for Honolulu County, Hawaii, and # FID 0 is for Pueblo County, Colorado. clear_counties() lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:] lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1] # Only Pueblo & Honolulu counties should be present because of # the `unique` keyword. Have to set `order_by` on this QuerySet # or else MySQL will return a different ordering than the other dbs. qs = County.objects.order_by('name') self.assertEqual(2, qs.count()) hi, co = tuple(qs) hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo'))) self.assertEqual('Pueblo', co.name) self.assertEqual(NUMS[co_idx], len(co.mpoly)) self.assertEqual('Honolulu', hi.name) self.assertEqual(NUMS[hi_idx], len(hi.mpoly)) # Testing the `step` keyword -- should get the same counties # regardless of we use a step that divides equally, that is odd, # or that is larger than the dataset. for st in (4, 7, 1000): clear_counties() lm.save(step=st, strict=True) self.county_helper(county_feat=False) def test_model_inheritance(self): "Tests LayerMapping on inherited models. See #12093." icity_mapping = {'name': 'Name', 'population': 'Population', 'density': 'Density', 'point': 'POINT', 'dt': 'Created', } # Parent model has geometry field. lm1 = LayerMapping(ICity1, city_shp, icity_mapping) lm1.save() # Grandparent has geometry field. lm2 = LayerMapping(ICity2, city_shp, icity_mapping) lm2.save() self.assertEqual(6, ICity1.objects.count()) self.assertEqual(3, ICity2.objects.count()) def test_invalid_layer(self): "Tests LayerMapping on invalid geometries. See #15378." invalid_mapping = {'point': 'POINT'} lm = LayerMapping(Invalid, invalid_shp, invalid_mapping, source_srs=4326) lm.save(silent=True) def test_textfield(self): "Tests that String content fits also in a TextField" mapping = copy(city_mapping) mapping['name_txt'] = 'Name' lm = LayerMapping(City, city_shp, mapping) lm.save(silent=True, strict=True) self.assertEqual(City.objects.count(), 3) self.assertEqual(City.objects.all().order_by('name_txt')[0].name_txt, "Houston") def test_encoded_name(self): """ Test a layer containing utf-8-encoded name """ city_shp = os.path.join(shp_path, 'ch-city', 'ch-city.shp') lm = LayerMapping(City, city_shp, city_mapping) lm.save(silent=True, strict=True) self.assertEqual(City.objects.count(), 1) self.assertEqual(City.objects.all()[0].name, "Zürich") class OtherRouter(object): def db_for_read(self, model, **hints): return 'other' def db_for_write(self, model, **hints): return self.db_for_read(model, **hints) def allow_relation(self, obj1, obj2, **hints): return None def allow_migrate(self, db, model): return True @skipUnless(HAS_GDAL and HAS_SPATIAL_DB, "GDAL and spatial db are required.") class LayerMapRouterTest(TestCase): def setUp(self): self.old_routers = router.routers router.routers = [OtherRouter()] def tearDown(self): router.routers = self.old_routers @unittest.skipUnless(len(settings.DATABASES) > 1, 'multiple databases required') def test_layermapping_default_db(self): lm = LayerMapping(City, city_shp, city_mapping) self.assertEqual(lm.using, 'other')
bsd-3-clause
black-perl/ptop
ptop/core/plugin.py
1
1653
''' ptop.core.plugin This module define the BaseClass for plugin. It defines the basic rules that are to be followed by a new plugin. ''' class Plugin(object): ''' Base Plugin class ''' def __init__(self,name,sensorType,interval): '''creates an instance of the class Initialize the instance of a Plugin class. :param name: Name of the plugin :param sensorType: How to render the plugin on the screen :param interval: The interval after which to update the stats :type sensorType: Chart or Table :rtype: Instance of Plugin class ''' self.name = name self.type = sensorType self.interval = interval self.currentValue = {} def update(self): '''updates the plugin currentValue :rtype: dict ''' # to be overrided by the child class @property def text_info(self): '''return the text part of the currentValue :rtype: dict ''' return self.currentValue['text'] @property def graph_info(self): '''return the graph part of the currentValue :rtype: dict ''' try: return self.currentValue['graph'] except KeyError: raise Exception('The plugin does not have any graphical information') @property def table_info(self): '''return the table part of the currentValue :rtype: list ''' try: return self.currentValue['table'] except KeyError: raise Exception('The plugin does not have any tabular information')
mit
Jgarcia-IAS/SAT
openerp/addons/marketing_campaign/report/__init__.py
441
1071
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import campaign_analysis # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ghchinoy/tensorflow
tensorflow/contrib/resampler/python/ops/resampler_ops.py
18
2918
# pylint: disable=g-bad-file-header # Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tensorflow op performing differentiable resampling.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.resampler.ops import gen_resampler_ops from tensorflow.contrib.util import loader from tensorflow.python.framework import ops from tensorflow.python.platform import resource_loader _resampler_so = loader.load_op_library( resource_loader.get_path_to_datafile("_resampler_ops.so")) def resampler(data, warp, name="resampler"): """Resamples input data at user defined coordinates. The resampler currently only supports bilinear interpolation of 2D data. Args: data: Tensor of shape `[batch_size, data_height, data_width, data_num_channels]` containing 2D data that will be resampled. warp: Tensor of minimum rank 2 containing the coordinates at which resampling will be performed. Since only bilinear interpolation is currently supported, the last dimension of the `warp` tensor must be 2, representing the (x, y) coordinate where x is the index for width and y is the index for height. name: Optional name of the op. Returns: Tensor of resampled values from `data`. The output tensor shape is determined by the shape of the warp tensor. For example, if `data` is of shape `[batch_size, data_height, data_width, data_num_channels]` and warp of shape `[batch_size, dim_0, ... , dim_n, 2]` the output will be of shape `[batch_size, dim_0, ... , dim_n, data_num_channels]`. Raises: ImportError: if the wrapper generated during compilation is not present when the function is called. """ with ops.name_scope(name, "resampler", [data, warp]): data_tensor = ops.convert_to_tensor(data, name="data") warp_tensor = ops.convert_to_tensor(warp, name="warp") return gen_resampler_ops.resampler(data_tensor, warp_tensor) @ops.RegisterGradient("Resampler") def _resampler_grad(op, grad_output): data, warp = op.inputs grad_output_tensor = ops.convert_to_tensor(grad_output, name="grad_output") return gen_resampler_ops.resampler_grad(data, warp, grad_output_tensor) ops.NotDifferentiable("ResamplerGrad")
apache-2.0
solomonvimal/pyras
pyras/controllers/hecras/hecrascontroller/ras41.py
1
49672
""" """ import os import os.path as osp class Controller(object): """HECRAS Controller vesrsion RAS41""" # %% Compute def Compute_Cancel(self): """ """ rc = self._rc rc.Compute_Cancel() def Compute_CurrentPlan(self): """ Computes the current plan. Parameters ---------- nmsg : int The number of returned messages. msg : str Messages returned from HECRASController during computations """ rc = self._rc nmsg = None msg = None res = rc.Compute_CurrentPlan(nmsg, msg) print(res, nmsg, msg) return res def Compute_HideComputationWindow(self): """ Set the computation window to be hidden during computations. Notes ----- This should be called before Compute_CurrentPlan. """ rc = self._rc rc.Compute_HideComputationWindow() def Compute_IsStillComputing(self): """ Returns True if a computation is still in execution. Returns ------- bool """ rc = self._rc res = rc.Compute_IsStillComputing() return res def Compute_ShowComputationWindow(self): """ Sets the computation window to be visible during computations. Notes ----- This hould be called before Compute_CurrentPlan. Because by default the RAS Controller shows the Computation Window, this is not necessary unless the Computation Window was already hidden in a previous line of code. """ rc = self._rc rc.Compute_ShowComputationWindow() def Compute_WATPlan(self): """ Computes a WAT plan. Returns ------- bool Notes ----- For WAT only. """ rc = self._rc res = rc.Compute_WATPlan() return res # %% Create def Create_WATPlanName(self, HECRASBasePlanName, SimulationName): """ Returns a WAT Plan Name, based i the RAS base plan name and the simulation. Parameters ---------- HECRASBasePlanName : str SimulationName : str Notes ----- For WT only. """ rc = self._rc res = rc.Create_WATPlanName(HECRASBasePlanName, SimulationName) return res # %% Current def CurrentGeomFile(self): """ Indicates the current HEC-RAS geometry file and its path. Returns ------- str """ rc = self._rc res = rc.CurrentGeomFile() return res def CurrentPlanFile(self): """ Indicates the current HEC-RAS plan file and its path. Returns ------- str """ rc = self._rc res = rc.CurrentPlanFile() return res def CurrentProjectFile(self): """ Indicates the current HEC-RAS project file and its path. Returns ------- str """ rc = self._rc res = rc.CurrentProjectFile() return res def CurrentProjectTitle(self): """ Indicates the current HEC-RAS project title. Returns ------- str """ rc = self._rc res = rc.CurrentProjectTitle() return res def CurrentSteadyFile(self): """ Indicates the current HEC-RAS steady flow file and its path. Returns ------- str """ rc = self._rc res = rc.CurrentSteadyFile() return res def CurrentUnSteadyFile(self): """ Indicates the current HEC-RAS unstead flow file and its path. Returns ------- str """ rc = self._rc res = rc.CurrentUnSteadyFile() return res # %% Edit Add def Edit_AddBC(self, river, reach, rs): """ Add a bridge/culvert. Parameters ---------- river : str The river name to add the bridge/culvert section to. reach : str The reach name to add the bridge/culvert to. rs : str The river station of the new bridge/culvert . Notes ----- The Edit_BC method must be included in the code after Edit_AddBC, and must cal the newly bridge/culvertin order for it to be saved to the geometry file. Edit_BC brings up the Bridge/Culvert editor. No edits are necessary to save the new bridge/culvert, the editor has to just open and close. Without Edit_BC, once the code has been completed and the HECRASController closes, HEC-RAS will close and the newly added cross section will be lost. """ rc = self._rc errmsg = '' res = rc.Edit_AddBC(river, reach, rs, errmsg) river, reach, rs, errmsg = res def Edit_AddIW(self, river, reach, rs): """ Add a inline structure section. Parameters ---------- river : str The river name to add the inline structure to. reach : str The reach name to add the inline structure to. rs : str The river station of the new inline structure. Notes ----- The Edit_IW method must be included in the code after Edit_AddIW, and must cal the newly added inline structure in order for it to be saved to the geometry file. Edit_IW brings up the Inline Structure Editor. No edits are necessary to save the new inline structure, the editor has to just open and close. Without Edit_IW, once the code has been completed and the HECRASController closes, HEC-RAS will close and the newly added cross section will be lost. """ rc = self._rc errmsg = '' res = rc.Edit_AddIW(river, reach, rs, errmsg) river, reach, rs, errmsg = res def Edit_AddLW(self, river, reach, rs): """ Add a lateral structure. Parameters ---------- river : str The river name to add the lateral structure to. reach : str The reach name to add the lateral structure to. rs : str The river station of the new lateral structure. Notes ----- The Edit_LW method must be included in the code after Edit_AddLW, and must cal the newly added lateral structure in order for it to be saved to the geometry file. Edit_LW brings up the Lateral Structure Editor. No edits are necessary to save the new lateral structure, the editor has to just open and close. Without Edit_LW, once the code has been completed and the HECRASController closes, HEC-RAS will close and the newly added cross section will be lost. """ rc = self._rc errmsg = '' res = rc.Edit_AddLW(river, reach, rs, errmsg) river, reach, rs, errmsg = res def Edit_AddXS(self, river, reach, rs): """ Add a cross section. Parameters ---------- river : str The river name to add the cross section to. reach : str The reach name to add the cross section to. rs : str The river station of the new cross setion. Notes ----- The Edit_XS method must be included in the code after Edit_AddXS, and must cal the newly added cross section in order for it to be saved to the geometry file. Edit_XS brings up the cross section editor. No edits are necessary to save the new cross section, the editor has to just open and close. Without Edit_XS, once the code has been completed and the HECRASController closes, HEC-RAS will close and the newly added cross section will be lost. """ rc = self._rc errmsg = '' res = rc.Edit_AddXS(river, reach, rs, errmsg) river, reach, rs, errmsg = res # %% Edit def Edit_BC(self, river, reach, rs): """ Opens the Bridge/Culvert Editor and displays the selected river station. Parameters ---------- river : str The river name of the bridge/culvert structure to edit. reach : str The reach name of the bridge/culvert to edit. rs : str The river station of the bridge/culvert to edit. Notes ----- Run-time is paused while edits are made in the Bridge/Culvert Editor. editor. Once the Bridge/Culvert Editor is closed, run-time resumes. """ rc = self._rc rc.Edit_BC(river, reach, rs) self._runtime.pause_bc() def Edit_GeometricData(self): """ Opens the Geometry Data window. Notes ----- Run-time is paused while edits are made in the Geometry Data Window. Once the Geometry Data Window is closed, run-time resumes. """ rc = self._rc rc.Edit_GeometricData() self._runtime.pause_geo() def Edit_IW(self, river, reach, rs): """ Opens the Inline Structure Editor and displays the selected river station. Parameters ---------- river : str The river name of the inline structure to edit. reach : str The reach name of the inline structure to edit. rs : str The river station of the inline structure to edit. Notes ----- Run-time is paused while edits are made in the inline structure editor. Once the Lateral Structure Editor is closed, run-time resumes. """ rc = self._rc rc.Edit_LW(river, reach, rs) self._runtime.pause_iw() def Edit_LW(self, river, reach, rs): """ Opens the Lateral Structure Editor and displays the selected river station. Parameters ---------- river : str The river name of the lateral structure to edit. reach : str The reach name of the lateral structure to edit. rs : str The river station of the lateral structure to edit. Notes ----- Run-time is paused while edits are made in the lateral structure editor. Once the Lateral Structure Editor is closed, run-time resumes. """ rc = self._rc rc.Edit_LW(river, reach, rs) self._runtime.pause_lw() def Edit_MultipleRun(self): """ Opens the Run Multiple Plans Dialog. Notes ----- Run-time does not pause while the Run Multiple Plans Dialog is open, so it is suggested that a message box be added after Edit_MultipleRun, so that the method does not end and close the window before the user can check plans. """ rc = self._rc rc.Edit_MultipleRun() self._runtime.pause_multiple() def Edit_PlanData(self): """ Opens the Steady or Unsteady Flow Analysis windows for edits (whichever is current). Notes ----- Run-time does not pause while the Unsteady Flow Editor is open, so it is suggested that a message box be added after Edit_UnsteadyFlowData, so that the method does not end and close the window before the user can make edits. """ rc = self._rc rc.Edit_PlanData() self._runtime.pause_plan() def Edit_QuasiUnsteadyFlowData(self): """ Opens the Unsteady Flow Editor Notes ----- Run-time does not pause while the Quasi-Unsteady Flow Editor is open, so it is suggested that a message box be added after Edit_QuasiUnsteadyFlowData, so that the method does not end and close the window before the user can make edits. """ rc = self._rc rc.Edit_QuasiUnsteadyFlowData() self._runtime.pause_quasi() def Edit_SedimentData(self): """ Opens the Sediment Data Editor. Notes ----- Run-time does not pause while the Sediment Data Editor is open, so it is suggested that a message box be added after Edit_SedimentData, so that the method does not end and close the window before the user can make edits. """ rc = self._rc rc.Edit_SedimentData() self._runtime.pause_sediment() def Edit_SteadyFlowData(self): """ Opens the Steady Flow Editor. Notes ----- Run-time does not pause while the Steady Flow Editor is open, so it is suggested that a message box be added after Edit_SteadyFlowData, so that the method does not end and close the window before the user can make edits. """ rc = self._rc rc.Edit_SteadyFlowData() self._runtime.pause_steady() def Edit_UnsteadyFlowData(self): """ Opens the Unsteady Flow Editor. Notes ----- Run-time does not pause while the Unsteady Flow Editor is open, so it is suggested that a message box be added after Edit_UnsteadyFlowData, so that the method does not end and close the window before the user can make edits. """ rc = self._rc rc.Edit_UnsteadyFlowData() self._runtime.pause_unsteady() def Edit_WaterQualityData(self): """ Opens the UWater Quality Data Editor. Notes ----- Run-time does not pause while the Water Quality Data Editor is open, so it is suggested that a message box be added after Edit_WaterQualityData so that the method does not end and close the window before the user can make edits. """ rc = self._rc rc.Edit_WaterQualityData() self._runtime.pause_quality() def Edit_XS(self, river, reach, rs): """ Opens the Cross Section Editor and displays the selected cross section. Parameters ---------- river : str The river name of the cross section. reach : str The reach name of the cross section. rs : str The river station of the cross section. Notes ----- Run-time is paused while edits are made in the Cross Section Editor. Once the Cross Section Editor is closed, run-time resumes. """ rc = self._rc res = rc.Edit_XS(river, reach, rs) self._runtime.pause_xs() # %% Export def ExportGIS(self): """ Export HEC-RAS results to an *.sdf export file that can be read into GIS using HEC-GeoRAS. Notes ----- The Export GIS Editos does NOT open when this subroutine is called. HECRASController uses whatever user inputs (i.e. profiles to export, results to export, types of geometric data to export, etc.) have already been set in the Editor and only wirtes the *.sdf export file. """ rc = self._rc rc.ExportGIS() # %% Geometry def Geometry(self): """ Returs the HECRASGeometry instance. Notes ----- See HECRASGeometry class for specific methods. """ return self._geometry def Geometery_GISImport(self, title, Filename): """ Imports geometry data from a *.sdf import file. Parameters ---------- title : str The title of the new geometry file to import. Filename : str The path and filename of the sdf file. Notes ----- The Import Geometry Data from GIS Editor does NOT open when this method is called. HECRASController uses default settings for importing. A new geometry file is created with this subroutine and al streams and nodes are imported. Note the misspelling "Geometerey"in the name of this method. """ rc = self._rc rc.Geometery_GISImport(title, Filename) def Geometry_GetGateNames(self, river, reach, station): """Returns a list of gates names. Parameters ---------- river : str The river name of the inline structure. reach : str The reach name of the inline structure. station : str The river station of the inline structure. """ rc = self._rc res = rc.Geometry_GetGateNames(river, reach, station) river, reach, station, ngate, GateNames, errmsg = res # Return an empty list or return None? if GateNames is None: GateNames = [] result = (ngate, list(GateNames)) if errmsg != '': raise Exception(errmsg) return result def Geometry_GetGML(self, geomfilename): """Returns the GML file txt for the current geometry file. Parameters ---------- geomfilename : str The name of the geometry file. """ # FIXME: Not working rc = self._rc res = rc.Geometry_GetGML(geomfilename) return def Geometry_GetNode(self, riv, rch, rs): """Returns the node ID of a selected node. Parameters ---------- riv : int The river ID of the node. rch : int The reach ID of the node. rs : str The river station of the node. Notes ----- Node can be any geometric component with a River Station (i.e. cross section, bridge/culvert, inline structure, lateral structure, multiple opening). """ # Input check if not isinstance(riv, int) or riv <= 0: raise Exception if not isinstance(rch, int) or rch <= 0: raise Exception if not isinstance(rs, str): raise Exception rc = self._rc res = rc.Geometry_GetNode(riv, rch, rs) node_id, riv, rch, rs = res if node_id == 0: node_id = None return node_id def Geometry_GetNodes(self, riv, rch): """ Returns a tuple of nodes and node types in a specified river and reach. Parameters ---------- riv : int The river ID. rch : int The reach ID. Returns ------- rs : tuple of str The tuple of river stations representing nodes on the selected river/reach. NodeType : tuple str The tuple of node types on the selected river/reach. """ rc = self._rc geo = self.Geometry() nRS = geo.nNode(riv, rch) rs = (float('nan'),)*(nRS + 1) NodeType = (float('nan'),)*(nRS + 1) res = rc.Geometry_GetNodes(riv, rch, nRS, rs, NodeType) riv, rch, nRS, rs, NodeType = res return rs, NodeType def Geometry_GetReaches(self, riv): """ Returns a list of the reach names in a given river id. Parameters ---------- riv : int The river ID. Returns ------- nReach : int The number of reaches in tge selected river. reach : str The names of the reaches on the selected river """ # Input check if not isinstance(riv, int): raise Exception nReach, reach = 0, tuple() rc = self._rc res = rc.Geometry_GetReaches(riv, nReach, reach) riv, nReach, reach = res # Return an empty list or return None? if reach is None: reach = [] result = (nReach, list(reach)) return result def Geometry_GetRivers(self): """ Returns a list of rivers names. Returns ---------- nRiver : int The number of rivers. river : list of str The list of the names of the rivers. """ rc = self._rc nRiver, river = 0, tuple() res = rc.Geometry_GetRivers(nRiver, river) nRiver, river = res if river is not None: result = nRiver, list(river) else: result = 0, [] return result def Geometry_SetMann(self, river, reach, rs, nMann, Mann_n, Station): """ Set the Manning's Values, by stationing, for a cross section. Parameters ---------- river : str The river to set Manning's n Values. reach : str The reach to set Manning's n Values. rs : str The river station of the cross section to set Manning's n values. nMann : int The number of Manning's n values to add. Mann_n : list/tuple of float A list of the Manning's n values to add. Station : list/tuple of float A list of the stationing values of the Manning's n breakpoints. Notes ----- If station values don't exist in the station elevation table, HECRASCntroller will use the closest station to apply the n value to. Python: This method takes care of 0-based indexing. """ rc = self._rc errmsg = '' # Adjust to 0-based indexing and force the use of tuples Mann_n = tuple([0] + list(Mann_n)) Station = tuple([0] + list(Station)) res = rc.Geometry_SetMann(river, reach, rs, nMann, Mann_n, Station, errmsg) flag, river, reach, rs, nMann, Mann_n, Station, errmsg = res if errmsg != '': raise Exception(errmsg) return flag def Geometry_SetMann_LChR(self, river, reach, rs, MannLOB, MannChan, MannROB): """ Sets the Manning's n Values, by left verbank, main channel, and right overbank, for a cross section. Parameters ---------- river : str The river to set Manning's n Values. reach : str The reach to set Manning's n Values. rs : str The river station of the cross section to set Manning's n Values. MannLOB : float Manning's n Value for the Left Overbank, MannChan : float Manning's n Value for the Main Channel. MannROB : float Manning's n Value for the Right Overbank. """ rc = self._rc errmsg = '' res = rc.Geometry_SetMann_LChR(river, reach, rs, MannLOB, MannChan, MannROB, errmsg) return res def Geometry_SetSAArea(self, SAName, Area): """ Set the Area of a Storage Area. Parameters ---------- SAName : str The name of the Storage Area. Area : float The area to set the Storage Area with. Notes ----- The Geometry_SetSAArea method works in runtime, sets the area, and returns a True value. But, you must ShowRAS and then save the geometry. Otherwise changes to SA area are not saved. Also, make sure to NOT close RAS during run time. The area ust already exists! Python: this method handles the save automatically. """ # TODO: using the parsed filed, look for valid areas and check # against this rc = self._rc errmsg = '' res = rc.Geometry_SetSAArea(SAName, Area, errmsg) geo = self.Geometry() geo.Save() return res # %% Get def GetDataLocations_Input(self, PlanTitle): """ Parameters ---------- PlanTitle : str The name of the plan. Returns ------- LocationDesciptions : list of str DSSFiles : list of str DSSPathnames : list of str """ rc = self._rc errmsg = '' LocationDesciptions = [] DSSFiles = [] DSSPathnames = [] res = rc.GetDataLocations_Input(PlanTitle, LocationDesciptions, DSSFiles, DSSPathnames, errmsg) return res def GetDataLocations_Input_count(self, PlanTitle): """ Parameters ---------- PlanTitle : str The name of the plan. Returns ------- int """ rc = self._rc errmsg = '' res = rc.GetDataLocations_Input_count(PlanTitle, errmsg) return res def GetDataLocations_Output(self, planTitle): """ Gets all stage and flow hydrograh output locations, including their dss file names and dss paths. Parameters ---------- planTitle : str The name of the plan. Returns ------- DSSFiles : list of str The list of DSS filenames. DSSPathnames : list of str The list of DSS Pathnames. """ rc = self._rc errmsg = '' DSSFiles = [] DSSPathnames = [] res = rc.Geometry_SetSAArea(planTitle, DSSFiles, DSSPathnames, errmsg) return res def GetDataLocations_Output_count(self, PlanTitle): """ Parameters ---------- PlanTitle : str The name of the plan. Returns ------- int """ rc = self._rc errmsg = '' res = rc.GetDataLocations_Output_count(PlanTitle, errmsg) return res # %% Versions def GetRASVersion(self): """ Returns the version number and date of HEC-RAS. Notes ----- Works the same as HECRASVersion. """ rc = self._rc version = rc.GetRASVersion() return version def HECRASVersion(self): """ Returns the version number and date of HEC-RAS. Notes ----- Works the same as GetRASVersion. """ rc = self._rc version = rc.HECRASVersion() return version # %% Map def Map_Add(self, Filename): """ Adds a map to the Geometry Schematic. Parameters ---------- Filename : str The path and filename of the image to add. Notes ----- This adds a map, but does not turn it on. """ rc = self._rc rc.Map_Add(Filename) # %% Output def Output_ComputationLevel_Export(self): """ """ def Output_GetNode(self, riv, reach, rs): """ Returns the Node ID, for a given River Station. Parameters ---------- riv : int The river ID number. rch : int Tge reach ID number. rs : str The river station of the desired node ID. Returns ------- int Node ID. Notes ----- Works like the Geometry_GetNode method, only this function read from the output file, so a *.O## file is requires (i.e. run computations first). """ rc = self._rc res = rc.Output_GetNode(riv, reach, rs) return res def Output_GetNodes(self): """ """ def Output_GetProfiles(self): """ """ def Output_GetReach(self): """ """ def Output_GetReaches(self): """ """ def Output_GetRiver(self): """ """ def Output_GetRivers(self): """ """ def Output_Initialize(self): """ """ def Output_NodeOutput(self): """ """ def Output_ReachOutput(self): """ """ def Output_Variables(self): """ """ def Output_VelDist(self): """ """ def OutputDSS_GetStageFlow(self): """ """ def OutputDSS_GetStageFlowSA(self): """ """ # %% Plan def Plan_GetFilename(self, planName): """ Given a plan name, returns the plan file, including path. Parameters ---------- planName : str The name of the plan. Returns ------- str Plan file and path """ rc = self._rc res = rc.Plan_GetFilename(planName) return res def Plan_Names(self, PlanCount, PlanNames, IncludeOnlyPlansInBaseDirectory): """ Gets a list of all the Plan Names in the active HEC-RAS project. Parameters ---------- IncludeOnlyPlansInBaseDirectory : bool Returns ------- PlanCount : int The number of plans. PlanNames : list of str The list of plan names. """ rc = self._rc res = rc.Plan_GetFilename(PlanCount, PlanNames, IncludeOnlyPlansInBaseDirectory) return res def Plan_Reports(self): """ List out the output plan "reports". Returns ------- ReportCount : int The number of plan reports. ReportNames : list of str The list of plan reports. """ rc = self._rc ReportCount, ReportNames = [], [] res = rc.Plan_GetFilename(ReportCount, ReportNames) return res def Plan_SetCurrent(self, PlanTitleToSet): """ Changes the current plan in the HEC-RAS project to the supplied Plan Name. Parameters ---------- PlanTitleToSet : str The name of the plan to set. Return ------ bool """ rc = self._rc res = rc.Plan_SetCurrent(PlanTitleToSet) return res def PlanOutput_IsCurrent(self, PlanTitleToCheck, ShowMessageList): """ Checks to see if a plan has an output file associated with it. Parameters ---------- PlanTitleToCheck : str The name of the plan to check. ShowMessageList : bool Whether or not to display a message box showing the plans. Returns ------- bool errmsg : str Notes ----- Displays a RAS window that shows a list of all the current plans in the RAS Project, and indicates the name and index number of the plan to check if it has been computed. If it does not have an output file (i.e. hasn't been computed), a message box will ask if you want to run the plan. A message box pops up that requires the user to cick OK to continue with run-time. Otherwise the RAS "Current Plan"window opens and closes quickly. """ rc = self._rc errmsg = '' res = rc.Plan_SetCurrent(PlanTitleToCheck, ShowMessageList, errmsg) return res def PlanOutput_SetCurrent(self, PlanTitleToSet): """ Sets the plan output to the selected plan. Parameters ---------- PlanTitleToSet : str The plan whose output to set as active. Returns ------- bool Notes ----- This only works if an output file exists for the selected plan. Does not change the current plan, nly changes the output file that is displayed in the output tables and plots. """ rc = self._rc res = rc.Plan_SetCurrent(PlanTitleToSet) return res def PlanOutput_SetMultiple(self, nPlanTitleToSet, PlanTitleToSet_0, ShowMessageList): """ Sets which pan to set to view in putput plots and tables. Parameters ---------- nPlanTitleToSet : int PlanTitleToSet_0 : str 0-based array of plan titles to set. ShowMessageList : bool Whether to have RAS display a message box showing the plan names. Returns ------- int Number of plans to set Notes ----- Sets the multiple plan outputs. Only works if output file exist for the selected plan. Does not change the current plan, only changes the output files that are displayed in the output table and plots. PlanOutput_SetMultiple requires a 0-based array for plan_titleToSet_0. The method plan_Naes returns a 1-based array so it must be converted to 0-based, prior to calling PlanOutput_SetMultiple. """ # %% Plot def PlotHydraulicTables(self, river, reach, rs): """ Displays the Hydraulic Property Plot for a given River, Reach, and River Station. Parameters ---------- river : str The river name. reach : str The reach name. rs : str The river station. """ rc = self._rc rc.PlotHydraulicTables(river, reach, rs) def PlotPF(self, river, reach): """ Displays the Water Surface Profile Plot for a given River and Reach. Parameters ---------- river : str The river name. reach : str The reach name. Notes ----- Must have an output file for this to work. """ rc = self._rc rc.PlotPF(river, reach) def PlotPFGeneral(self, river, reach): """ Displays the General Profile Plot for a given River and Reach. Parameters ---------- river : str The river name. reach : str The reach name. Notes ----- Must have an output file for this to work. """ rc = self._rc rc.PlotPFGeneral(river, reach) def PlotRatingCurve(self, river, reach): """ Displays the Rating Curve for a given River, Reach and River Station. Parameters ---------- river : str The river name. reach : str The reach name. rs : str The river station. Notes ----- Must have an output file for this to work. """ rc = self._rc rc.PlotRatingCurve(river, reach) def PlotStageFlow(self, river, reach, rs): """ Displays the Stage and Flow Hydrograh for a given River, Reach and River Station. Parameters ---------- river : str The river name. reach : str The reach name. rs : str The river station. Notes ----- For unsteady plans only. Must have an output file for this to work. """ rc = self._rc rc.PlotStageFlow(river, reach, rs) def PlotStageFlow_SA(self, SAName): """ Displays the Stage and Flow Hydrograph for a given Storage Area. Notes ----- For unsteady flow only. Must have an output file for this to work. Names for storage areas cannot be read using the HECRASController, therefore the storage area name has to be hard coded, read from a file, or retrieved interactively during run-time. """ rc = self._rc rc.PlotStageFlow_SA(SAName) def PlotXS(self, river, reach, rs): """ Displays the Cross Section Plot for a given River, Reach and River Station. Parameters ---------- river : str The river name. reach : str The reach name. rs : str The river station. """ rc = self._rc rc.PlotXS(river, reach, rs) def PlotXYZ(self, river, reach): """ Displays the XYZ Plot for given River and Reach. Parameters ---------- river : str The river name. reach : str The reach name. """ rc = self._rc rc.PlotXYZ(river, reach) # %% Project def Project_Current(self): """Returns the file name and path of the current HEC-RAS project.""" rc = self._rc res = rc.Project_Current() return res def Project_New(self, title, Filename): """Starts a new HEC-RAS project with a given project fullpath and sets the title. Parameters ---------- title : str The title if the new HEC-RAS project. Filename : str Full path of the new HEC-RAS project. """ rc = self._rc # Check relative path to script dirpath = osp.dirname(osp.abspath(Filename)) if osp.isdir(dirpath): # Create directory recursively os.makedirs(dirpath) else: fullpath = osp.abspath(Filename) rc.Project_Open(title, fullpath) def Project_Open(self, ProjectFileName): """ Open a HEC-RAS project with a given project path. Parameters ---------- ProjectFileName : str Full path of the given HEC-RAS project to open. """ rc = self._rc # Check relative path to script if osp.isfile(ProjectFileName): fullpath = osp.abspath(ProjectFileName) else: error = 'File "{}" not found'.format(fullpath) raise IOError(error) rc.Project_Open(fullpath) def Project_Save(self): """ Save the current HEC-RAS project. """ rc = self._rc rc.Project_Save() def Project_SaveAs(self, newProjectName): """ Saves as a new project with a given project file name and path. Parameters ---------- newProjectName : str Path and file name of the HEC-RAS project to save as. """ rc = self._rc fullpath = osp.abspath(newProjectName) rc.Project_SaveAs(fullpath) # %% Schematic def Schematic_ReachCount(self): """ Returns the number of reaches in the current HEC-RAS project's active geometry. Returns ------- int Number of Reaches. """ rc = self._rc res = rc.Schematic_ReachCount() return res def Schematic_ReachPointCount(self): """ Returns the total number of reach vertex points that make up all of the schematic reach lines in the active geometry. Returns ------- int Number of reach vertex points. """ rc = self._rc res = rc.Schematic_ReachPointCount() return res def Schematic_ReachPoints(self): """ Returns rivers, reaches and x-y coordinates for each reach. Returns ------- RiverName_0 : list of str The list of river names. ReachName_0 : list of str The list of reach names. ReachStartIndex_0 : list of int The list of starting index numbers for coordinate points. ReachPointCount_0 : list of int The list of the number of reach points for each reach ReachPointX_0 : list of float The list of x coordinate points. ReachPointY_0 : list of float The list of y coordinate points. Notes ----- All array parameters are 0-based for this method and must be redimensioned. Python: 0-based is handled, so the user does not need to account for. """ rc = self._rc geo = self.Geometry() # FIXME: n_rivers = geo.nRiver() n_reaches = self.Schematic_ReachCount() n_points = self.Schematic_ReachPointCount() RiverName_0 = ('',)*(n_rivers) ReachName_0 = ('',)*(n_reaches) ReachStartIndex_0 = (0,)*(n_reaches) ReachPointCount_0 = (0,)*(n_reaches) ReachPointX_0 = (0.0,)*(n_points) ReachPointY_0 = (0.0,)*(n_points) res = rc.Schematic_ReachPoints(RiverName_0, ReachName_0, ReachStartIndex_0, ReachPointCount_0, ReachPointX_0, ReachPointY_0) (RiverName_0, ReachName_0, ReachStartIndex_0, ReachPointCount_0, ReachPointX_0, ReachPointY_0) = res return res def Schematic_XSCount(self): """ Returns the number of cross sections in the current HEC-RAS project's active geometry. Returns ------- int Number of Cross Sections """ rc = self._rc res = rc.Schematic_XSCount() return res def Schematic_XSPointCount(self): """ Returns the total number of cross secton vertex points that make up all of the cross sections in the active geometry. Returns ------- int Number of cross section points. """ rc = self._rc res = rc.Schematic_XSPointCount() return res def Schematic_XSPoints(self): """ Returns river stations, their reaches and x-y coordinates for each river station. Returns ------- RSName_0 : list of str The list of river stations. ReachIdex_0 : list of int The list of reach IDs. XSStartIndex_0 : list of int The list of starting index numbers for coordinate points. XSPointCount_0 : list of int The list of the number of cross section points for each reach cross section. XSPointX_0 : list of float The list of cross section x coordinate points. ReachPointY_0 : list of float The list of cross section y coordinate points. Notes ----- All array parameters are 0-based for this method and must be redimensioned. """ rc = self._rc n_xs = self.Schematic_XSCount() n_points = self.Schematic_XSPointCount() RSName_0 = ('',)*(n_xs) ReachIndex_0 = (-1,)*(n_xs) XSStartIndex_0 = (-1,)*(n_xs) XSPointCount_0 = (-1,)*(n_xs) XSPointX_0 = (float('nan'),)*(n_points) XSPointY_0 = (float('nan'),)*(n_points) res = rc.Schematic_XSPoints(RSName_0, ReachIndex_0, XSStartIndex_0, XSPointCount_0, XSPointX_0, XSPointY_0) (RSName_0, ReachIndex_0, XSStartIndex_0, XSPointCount_0, XSPointX_0, XSPointY_0) = res return res # %% Set TODO: def SetDataLocations(self, PlanTitle, count, LocationDesciptions, DSSFiles, DSSPathnames): """ PlanTitle : str count : int LocationDesciptions : str DSSFiles : str DSSPathnames : str """ rc = self._rc errmsg = '' res = rc.SetDataLocations(PlanTitle, count, LocationDesciptions, DSSFiles, DSSPathnames, errmsg) # %% Show def ShowRas(self): """ Displays the main HEC-RAS window Notes ----- Once a RAS project has been open, ShowRAS will display it. Just opening a RAS project, only opens it as a process running in the background. You have to ShowRAS to see it on your monitor. Run-time must be paused in some way to be able to see HEC-RAS though. If the RAS Controller is called within a function, as soon as that function has been executed and completed, the instance of HECRASController will close (thus closing the HEC-RAS application). To keep HEC-RASS open throw out a message box that requires user interaction to close, which effectively pauses the run-time. """ rc = self._rc rc.ShowRas() # %% Steady def SteadyFlow_ClearFlowData(self): """ Clears the flow data in the current plan's steady flow file. Notes ----- Fr steady flow plans only. """ rc = self._rc rc.SteadyFlow_ClearFlowData() def SteadyFlow_FixedWSBoundary(self, river, reach, Downstream, WSElev): """ Sets fixed water surface boundary conditions. Parameters ---------- river : str The River name. reach : str The Reach name. Downstream : bool True if this is a downstream boundary. Otherwise False. WSElev : float The list of water surface elevations to set as fixed water surface boundary conditions.. Notes ----- For steady flow plans only. The WSElev list contains fixed water surface elevations for each profile in the active plan's flow file. """ rc = self._rc rc.SteadyFlow_FixedWSBoundary(river, reach, Downstream, WSElev) def SteadyFlow_nProfile(self): """ Returns the number of setady flow profiles in the current plan's active steady flow file. Notes ----- For steady flow plans only. """ rc = self._rc res = rc.SteadyFlow_nProfile() return res def SteadyFlow_SetFlow(self, river, reach, rs, Flow): """ For a given River Station, sets the flows for each profile in the active plan's steady flow file. Parameters ---------- river : str The river name. reach : str The reach name. rs : str The river station. Flow : float The list/tuple of flow values to add. Notes ----- For steady flow plans only. If the River Station currently is not in the flow table, it will added. Need to first determine the number of profiles to set up the item count in the Flow array. """ rc = self._rc res = rc.SteadyFlow_SetFlow(river, reach, rs, Flow) # %% Table def TablePF(self, river, reach): """ Displays the Profile Output Table for a given river, reach. Parameters ---------- river : str The river name. reach : str The reach name. """ rc = self._rc res = rc.TablePF(river, reach) def TableXS(self, river, reach, rs): """ Displays the Cross Section Output Table for a given river, reach and river station. Parameters ---------- river : str The river name. reach : str The reach name. rs : str The river station. """ rc = self._rc res = rc.TableXS(river, reach, rs) # %% Unsteady def UnsteadyFlow_SetGateOpening_Constant(self, river, reach, rs, GateName, OpenHeight): """ Sets the gate opening for a specified gate group to a constant value in the Time Series Gate Opening boundary condition. Parameters ---------- river : str The river name. reach : str The reach name. rs : str The river station. GateName : str The gate group name to set a new gate opening height. OpenHeight : float The gate opening height to set. Notes ----- The time interval in the TS Gate Opening boundary condition is set to 1 year. """ rc = self._rc errmsg = '' res = rc.UnsteadyFlow_SetGateOpening_Constant(river, reach, rs, GateName, OpenHeight) river, reach, rs, GateName, OpenHeight, errmsg = res return errmsg class ControllerDeprecated(object): """ """ pass class RASEvents: """Not working""" def HECRASController_ComputeProgressBar(self, Progress): """ Repeatedly returns a single value between 0 and 1, indicating the progress of the computations. Parameters ---------- Progress : float Progress of computations [0, 1] Notes ----- Must instantiate the HECRASController "With Events". Then the event rc.ComputeProgressBar becomes available for code. rc being the variable name for the instanciated HECRASController. rc_ComputeProgressBar is called repeatedly once Compute_CurrentPlan is called and thorugh the duration of the HEC-RAS Computations. Python: this events do not work with win32com """ print(Progress) return Progress def ComputeProgressMessage(self, msg): """ Repeatedly returns computations messages during computations. Parameters ---------- Msg : str Computation message. Notes ----- Must instantiate the HECRASController "With Events". Then the method rc_ComputeProgressBar becomes available for code. rc being the variable name for the instanciated HECRASController. rc_ComputeProgressMessage is called repeatedly once Compute_CurrentPlan is called and thorugh the duration of the HEC-RAS Computations. Python: this events do not work with win32com """ print(msg) return msg
mit
chenc10/Spark-PAF
dist/examples/src/main/python/ml/cross_validator.py
8
4328
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from pyspark import SparkContext from pyspark.ml import Pipeline from pyspark.ml.classification import LogisticRegression from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.ml.feature import HashingTF, Tokenizer from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.sql import Row, SQLContext """ A simple example demonstrating model selection using CrossValidator. This example also demonstrates how Pipelines are Estimators. Run with: bin/spark-submit examples/src/main/python/ml/cross_validator.py """ if __name__ == "__main__": sc = SparkContext(appName="CrossValidatorExample") sqlContext = SQLContext(sc) # Prepare training documents, which are labeled. LabeledDocument = Row("id", "text", "label") training = sc.parallelize([(0, "a b c d e spark", 1.0), (1, "b d", 0.0), (2, "spark f g h", 1.0), (3, "hadoop mapreduce", 0.0), (4, "b spark who", 1.0), (5, "g d a y", 0.0), (6, "spark fly", 1.0), (7, "was mapreduce", 0.0), (8, "e spark program", 1.0), (9, "a e c l", 0.0), (10, "spark compile", 1.0), (11, "hadoop software", 0.0) ]) \ .map(lambda x: LabeledDocument(*x)).toDF() # Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr. tokenizer = Tokenizer(inputCol="text", outputCol="words") hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features") lr = LogisticRegression(maxIter=10) pipeline = Pipeline(stages=[tokenizer, hashingTF, lr]) # We now treat the Pipeline as an Estimator, wrapping it in a CrossValidator instance. # This will allow us to jointly choose parameters for all Pipeline stages. # A CrossValidator requires an Estimator, a set of Estimator ParamMaps, and an Evaluator. # We use a ParamGridBuilder to construct a grid of parameters to search over. # With 3 values for hashingTF.numFeatures and 2 values for lr.regParam, # this grid will have 3 x 2 = 6 parameter settings for CrossValidator to choose from. paramGrid = ParamGridBuilder() \ .addGrid(hashingTF.numFeatures, [10, 100, 1000]) \ .addGrid(lr.regParam, [0.1, 0.01]) \ .build() crossval = CrossValidator(estimator=pipeline, estimatorParamMaps=paramGrid, evaluator=BinaryClassificationEvaluator(), numFolds=2) # use 3+ folds in practice # Run cross-validation, and choose the best set of parameters. cvModel = crossval.fit(training) # Prepare test documents, which are unlabeled. Document = Row("id", "text") test = sc.parallelize([(4L, "spark i j k"), (5L, "l m n"), (6L, "mapreduce spark"), (7L, "apache hadoop")]) \ .map(lambda x: Document(*x)).toDF() # Make predictions on test documents. cvModel uses the best model found (lrModel). prediction = cvModel.transform(test) selected = prediction.select("id", "text", "probability", "prediction") for row in selected.collect(): print(row) sc.stop()
apache-2.0
duyetdev/openerp-6.1.1
openerp/addons/stock/report/picking.py
9
1729
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from report import report_sxw from osv import osv import pooler class picking(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(picking, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, 'get_qtytotal':self._get_qtytotal }) def _get_qtytotal(self,move_lines): total = 0.0 uom = move_lines[0].product_uom.name for move in move_lines: total+=move.product_qty return {'quantity':total,'uom':uom} report_sxw.report_sxw('report.stock.picking.list','stock.picking','addons/stock/report/picking.rml',parser=picking) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
conjure-up/conjure-up
conjureup/controllers/juju/credentials/tui.py
3
1332
from conjureup import events, utils from conjureup.app_config import app from conjureup.consts import cloud_types from . import common class CredentialsController(common.BaseCredentialsController): def render(self): self.load_credentials() if app.provider.cloud_type == cloud_types.LOCAL: # no credentials required for localhost self.finish() elif not self.credentials: utils.warning("You attempted to do an install against a cloud " "that requires credentials that could not be " "found. If you wish to supply those " "credentials please run " "`juju add-credential " "{}`.".format(app.provider.cloud)) events.Shutdown.set(1) elif not app.provider.credential: utils.warning("You attempted to install against a cloud with " "multiple credentials and no default credentials " "set. Please set a default credential with:\n" "\n" " juju set-default-credential {} <credential>") events.Shutdown.set(1) else: self.finish() _controller_class = CredentialsController
mit
nirmeshk/oh-mainline
mysite/profile/migrations/0033_ditch_favoriting_for_projectexps.py
17
5557
# This file is part of OpenHatch. # Copyright (C) 2009 OpenHatch, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from south.db import db from django.db import models from mysite.profile.models import * class Migration: def forwards(self, orm): # Deleting field 'ProjectExp.favorite' db.delete_column('profile_projectexp', 'favorite') def backwards(self, orm): # Adding field 'ProjectExp.favorite' db.add_column('profile_projectexp', 'favorite', models.BooleanField(default=0)) models = { 'profile.person': { 'gotten_name_from_ohloh': ('models.BooleanField', [], {'default': 'False'}), 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'interested_in_working_on': ('models.CharField', [], {'default': "''", 'max_length': '1024'}), 'last_polled': ('models.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}), 'ohloh_grab_completed': ('models.BooleanField', [], {'default': 'False'}), 'poll_on_next_web_view': ('models.BooleanField', [], {'default': 'True'}), 'user': ('models.ForeignKey', ["orm['auth.User']"], {'unique': 'True'}) }, 'profile.link_person_tag': { 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'person': ('models.ForeignKey', ["orm['profile.Person']"], {}), 'source': ('models.CharField', [], {'max_length': '200'}), 'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}) }, 'profile.tag': { 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'tag_type': ('models.ForeignKey', ["orm['profile.TagType']"], {}), 'text': ('models.CharField', [], {'max_length': '50'}) }, 'profile.link_projectexp_tag': { 'Meta': {'unique_together': "[('tag','project_exp','source'),]"}, 'favorite': ('models.BooleanField', [], {'default': 'False'}), 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'project_exp': ('models.ForeignKey', ["orm['profile.ProjectExp']"], {}), 'source': ('models.CharField', [], {'max_length': '200'}), 'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}) }, 'profile.sourceforgeperson': { 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'username': ('models.CharField', [], {'max_length': '200'}) }, 'profile.link_project_tag': { 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'project': ('models.ForeignKey', ["orm['search.Project']"], {}), 'source': ('models.CharField', [], {'max_length': '200'}), 'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}) }, 'profile.sourceforgeproject': { 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'unixname': ('models.CharField', [], {'max_length': '200'}) }, 'search.project': { '_stub': True, 'id': ('models.AutoField', [], {'primary_key': 'True'}) }, 'auth.user': { '_stub': True, 'id': ('models.AutoField', [], {'primary_key': 'True'}) }, 'profile.link_sf_proj_dude_fm': { 'Meta': {'unique_together': "[('person','project'),]"}, 'date_collected': ('models.DateTimeField', [], {}), 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'is_admin': ('models.BooleanField', [], {'default': 'False'}), 'person': ('models.ForeignKey', ["orm['profile.SourceForgePerson']"], {}), 'position': ('models.CharField', [], {'max_length': '200'}), 'project': ('models.ForeignKey', ["orm['profile.SourceForgeProject']"], {}) }, 'profile.tagtype': { 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'name': ('models.CharField', [], {'max_length': '100'}), 'prefix': ('models.CharField', [], {'max_length': '20'}) }, 'profile.projectexp': { 'description': ('models.TextField', [], {}), 'id': ('models.AutoField', [], {'primary_key': 'True'}), 'man_months': ('models.PositiveIntegerField', [], {'null': 'True'}), 'person': ('models.ForeignKey', ["orm['profile.Person']"], {}), 'person_role': ('models.CharField', [], {'max_length': '200'}), 'primary_language': ('models.CharField', [], {'max_length': '200', 'null': 'True'}), 'project': ('models.ForeignKey', ["orm['search.Project']"], {}), 'source': ('models.CharField', [], {'max_length': '100', 'null': 'True'}), 'url': ('models.URLField', [], {'max_length': '200', 'null': 'True'}) } } complete_apps = ['profile']
agpl-3.0
Sixshaman/networkx
networkx/algorithms/cluster.py
6
10815
# -*- coding: utf-8 -*- # # Copyright (C) 2004-2016 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. """Algorithms to characterize the number of triangles in a graph.""" from __future__ import division from itertools import combinations import networkx as nx from networkx import NetworkXError from networkx.utils import not_implemented_for __author__ = """\n""".join(['Aric Hagberg <[email protected]>', 'Dan Schult ([email protected])', 'Pieter Swart ([email protected])', 'Jordi Torrents <[email protected]>']) __all__= ['triangles', 'average_clustering', 'clustering', 'transitivity', 'square_clustering'] @not_implemented_for('directed') def triangles(G, nodes=None): """Compute the number of triangles. Finds the number of triangles that include a node as one vertex. Parameters ---------- G : graph A networkx graph nodes : container of nodes, optional (default= all nodes in G) Compute triangles for nodes in this container. Returns ------- out : dictionary Number of triangles keyed by node label. Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.triangles(G,0)) 6 >>> print(nx.triangles(G)) {0: 6, 1: 6, 2: 6, 3: 6, 4: 6} >>> print(list(nx.triangles(G,(0,1)).values())) [6, 6] Notes ----- When computing triangles for the entire graph each triangle is counted three times, once at each node. Self loops are ignored. """ # If `nodes` represents a single node in the graph, return only its number # of triangles. if nodes in G: return next(_triangles_and_degree_iter(G,nodes))[2] // 2 # Otherwise, `nodes` represents an iterable of nodes, so return a # dictionary mapping node to number of triangles. return {v: t // 2 for v, d, t in _triangles_and_degree_iter(G, nodes)} @not_implemented_for('multigraph') def _triangles_and_degree_iter(G, nodes=None): """ Return an iterator of (node, degree, triangles). This double counts triangles so you may want to divide by 2. See degree() and triangles() for definitions and details. """ if nodes is None: nodes_nbrs = G.adj.items() else: nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) for v, v_nbrs in nodes_nbrs: vs = set(v_nbrs) - {v} ntriangles = sum(len(vs & (set(G[w]) - {w})) for w in vs) yield (v, len(vs), ntriangles) @not_implemented_for('multigraph') def _weighted_triangles_and_degree_iter(G, nodes=None, weight='weight'): """ Return an iterator of (node, degree, weighted_triangles). Used for weighted clustering. """ if weight is None or G.number_of_edges() == 0: max_weight = 1 else: max_weight= max(d.get(weight, 1) for u, v, d in G.edges(data=True)) if nodes is None: nodes_nbrs = G.adj.items() else: nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) def wt(u, v): return G[u][v].get(weight, 1) / max_weight for i, nbrs in nodes_nbrs: inbrs = set(nbrs) - {i} weighted_triangles = 0 seen = set() for j in inbrs: seen.add(j) # This prevents double counting. jnbrs = set(G[j]) - seen # Only compute the edge weight once, before the inner inner # loop. wij = wt(i, j) weighted_triangles += sum((wij * wt(j, k) * wt(k, i)) ** (1 / 3) for k in inbrs & jnbrs) yield (i, len(inbrs), 2 * weighted_triangles) def average_clustering(G, nodes=None, weight=None, count_zeros=True): r"""Compute the average clustering coefficient for the graph G. The clustering coefficient for the graph is the average, .. math:: C = \frac{1}{n}\sum_{v \in G} c_v, where `n` is the number of nodes in `G`. Parameters ---------- G : graph nodes : container of nodes, optional (default=all nodes in G) Compute average clustering for nodes in this container. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. count_zeros : bool If False include only the nodes with nonzero clustering in the average. Returns ------- avg : float Average clustering Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.average_clustering(G)) 1.0 Notes ----- This is a space saving routine; it might be faster to use the clustering function to get a list and then take the average. Self loops are ignored. References ---------- .. [1] Generalizations of the clustering coefficient to weighted complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). http://jponnela.com/web_documents/a9.pdf .. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated nodes and leafs on clustering measures for small-world networks. http://arxiv.org/abs/0802.2512 """ c = clustering(G, nodes, weight=weight).values() if not count_zeros: c = [v for v in c if v > 0] return sum(c) / len(c) @not_implemented_for('directed') def clustering(G, nodes=None, weight=None): r"""Compute the clustering coefficient for nodes. For unweighted graphs, the clustering of a node `u` is the fraction of possible triangles through that node that exist, .. math:: c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)}, where `T(u)` is the number of triangles through node `u` and `deg(u)` is the degree of `u`. For weighted graphs, the clustering is defined as the geometric average of the subgraph edge weights [1]_, .. math:: c_u = \frac{1}{deg(u)(deg(u)-1))} \sum_{uv} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}. The edge weights `\hat{w}_{uv}` are normalized by the maximum weight in the network `\hat{w}_{uv} = w_{uv}/\max(w)`. The value of `c_u` is assigned to 0 if `deg(u) < 2`. Parameters ---------- G : graph nodes : container of nodes, optional (default=all nodes in G) Compute clustering for nodes in this container. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. Returns ------- out : float, or dictionary Clustering coefficient at specified nodes Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.clustering(G,0)) 1.0 >>> print(nx.clustering(G)) {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} Notes ----- Self loops are ignored. References ---------- .. [1] Generalizations of the clustering coefficient to weighted complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). http://jponnela.com/web_documents/a9.pdf """ if weight is not None: td_iter = _weighted_triangles_and_degree_iter(G, nodes, weight) else: td_iter = _triangles_and_degree_iter(G, nodes) clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t in td_iter} if nodes in G: # Return the value of the sole entry in the dictionary. return clusterc[nodes] return clusterc def transitivity(G): r"""Compute graph transitivity, the fraction of all possible triangles present in G. Possible triangles are identified by the number of "triads" (two edges with a shared vertex). The transitivity is .. math:: T = 3\frac{\#triangles}{\#triads}. Parameters ---------- G : graph Returns ------- out : float Transitivity Examples -------- >>> G = nx.complete_graph(5) >>> print(nx.transitivity(G)) 1.0 """ triangles = sum(t for v, d, t in _triangles_and_degree_iter(G)) contri = sum(d * (d - 1) for v, d, t in _triangles_and_degree_iter(G)) return 0 if triangles == 0 else triangles / contri def square_clustering(G, nodes=None): r""" Compute the squares clustering coefficient for nodes. For each node return the fraction of possible squares that exist at the node [1]_ .. math:: C_4(v) = \frac{ \sum_{u=1}^{k_v} \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v} \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]}, where `q_v(u,w)` are the number of common neighbors of `u` and `w` other than `v` (ie squares), and `a_v(u,w) = (k_u - (1+q_v(u,w)+\theta_{uv}))(k_w - (1+q_v(u,w)+\theta_{uw}))`, where `\theta_{uw} = 1` if `u` and `w` are connected and 0 otherwise. Parameters ---------- G : graph nodes : container of nodes, optional (default=all nodes in G) Compute clustering for nodes in this container. Returns ------- c4 : dictionary A dictionary keyed by node with the square clustering coefficient value. Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.square_clustering(G,0)) 1.0 >>> print(nx.square_clustering(G)) {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} Notes ----- While `C_3(v)` (triangle clustering) gives the probability that two neighbors of node v are connected with each other, `C_4(v)` is the probability that two neighbors of node v share a common neighbor different from v. This algorithm can be applied to both bipartite and unipartite networks. References ---------- .. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005 Cycles and clustering in bipartite networks. Physical Review E (72) 056127. """ if nodes is None: node_iter = G else: node_iter = G.nbunch_iter(nodes) clustering = {} for v in node_iter: clustering[v] = 0 potential = 0 for u, w in combinations(G[v], 2): squares = len((set(G[u]) & set(G[w])) - set([v])) clustering[v] += squares degm = squares + 1 if w in G[u]: degm += 1 potential += (len(G[u]) - degm) * (len(G[w]) - degm) + squares if potential > 0: clustering[v] /= potential if nodes in G: # Return the value of the sole entry in the dictionary. return clustering[nodes] return clustering
bsd-3-clause
fxsjy/pybrain
pybrain/datasets/importance.py
5
1811
__author__ = 'Tom Schaul, [email protected]' from scipy import ones, dot from sequential import SequentialDataSet from pybrain.utilities import fListToString # CHECKME: does this provide for importance-datasets in the non-sequential case # maybe there should be a second class - or another structure! class ImportanceDataSet(SequentialDataSet): """ Allows setting an importance value for each of the targets of a sample. """ def __init__(self, indim, targetdim): SequentialDataSet.__init__(self, indim, targetdim) self.addField('importance', targetdim) self.link.append('importance') def addSample(self, inp, target, importance=None): """ adds a new sample consisting of input, target and importance. :arg inp: the input of the sample :arg target: the target of the sample :key importance: the importance of the sample. If left None, the importance will be set to 1.0 """ if importance == None: importance = ones(len(target)) self.appendLinked(inp, target, importance) def _evaluateSequence(self, f, seq, verbose = False): """ return the importance-ponderated MSE over one sequence. """ totalError = 0 ponderation = 0. for input, target, importance in seq: res = f(input) e = 0.5 * dot(importance.flatten(), ((target-res).flatten()**2)) totalError += e ponderation += sum(importance) if verbose: print 'out: ', fListToString(list(res)) print 'correct: ', fListToString(target) print 'importance:', fListToString(importance) print 'error: % .8f' % e return totalError, ponderation
bsd-3-clause
vincepandolfo/django
tests/gis_tests/geo3d/tests.py
26
17237
from __future__ import unicode_literals import os import re from unittest import skipUnless from django.contrib.gis.db.models import Extent3D, Union from django.contrib.gis.db.models.functions import ( AsGeoJSON, AsKML, Length, Perimeter, Scale, Translate, ) from django.contrib.gis.gdal import HAS_GDAL from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon from django.test import TestCase, ignore_warnings, skipUnlessDBFeature from django.utils._os import upath from django.utils.deprecation import RemovedInDjango20Warning from .models import ( City3D, Interstate2D, Interstate3D, InterstateProj2D, InterstateProj3D, MultiPoint3D, Point2D, Point3D, Polygon2D, Polygon3D, ) if HAS_GDAL: from django.contrib.gis.utils import LayerMapping, LayerMapError data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data')) city_file = os.path.join(data_path, 'cities', 'cities.shp') vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt') # The coordinates of each city, with Z values corresponding to their # altitude in meters. city_data = ( ('Houston', (-95.363151, 29.763374, 18)), ('Dallas', (-96.801611, 32.782057, 147)), ('Oklahoma City', (-97.521157, 34.464642, 380)), ('Wellington', (174.783117, -41.315268, 14)), ('Pueblo', (-104.609252, 38.255001, 1433)), ('Lawrence', (-95.235060, 38.971823, 251)), ('Chicago', (-87.650175, 41.850385, 181)), ('Victoria', (-123.305196, 48.462611, 15)), ) # Reference mapping of city name to its altitude (Z value). city_dict = {name: coords for name, coords in city_data} # 3D freeway data derived from the National Elevation Dataset: # http://seamless.usgs.gov/products/9arc.php interstate_data = ( ('I-45', 'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,' '-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,' '-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,' '-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,' '-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,' '-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,' '-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,' '-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,' '-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,' '-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,' '-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)', (11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858, 15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16, 15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857, 15.435), ), ) # Bounding box polygon for inner-loop of Houston (in projected coordinate # system 32140), with elevation values from the National Elevation Dataset # (see above). bbox_data = ( 'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,' '942051.75 4208366.38,941527.97 4225693.20))', (21.71, 13.21, 9.12, 16.40, 21.71) ) class Geo3DLoadingHelper(object): def _load_interstate_data(self): # Interstate (2D / 3D and Geographic/Projected variants) for name, line, exp_z in interstate_data: line_3d = GEOSGeometry(line, srid=4269) line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269) # Creating a geographic and projected version of the # interstate in both 2D and 3D. Interstate3D.objects.create(name=name, line=line_3d) InterstateProj3D.objects.create(name=name, line=line_3d) Interstate2D.objects.create(name=name, line=line_2d) InterstateProj2D.objects.create(name=name, line=line_2d) def _load_city_data(self): for name, pnt_data in city_data: City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326)) def _load_polygon_data(self): bbox_wkt, bbox_z = bbox_data bbox_2d = GEOSGeometry(bbox_wkt, srid=32140) bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140) Polygon2D.objects.create(name='2D BBox', poly=bbox_2d) Polygon3D.objects.create(name='3D BBox', poly=bbox_3d) @skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.") @skipUnlessDBFeature("gis_enabled", "supports_3d_storage") class Geo3DTest(Geo3DLoadingHelper, TestCase): """ Only a subset of the PostGIS routines are 3D-enabled, and this TestCase tries to test the features that can handle 3D and that are also available within GeoDjango. For more information, see the PostGIS docs on the routines that support 3D: http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions """ def test_3d_hasz(self): """ Make sure data is 3D and has expected Z values -- shouldn't change because of coordinate system. """ self._load_interstate_data() for name, line, exp_z in interstate_data: interstate = Interstate3D.objects.get(name=name) interstate_proj = InterstateProj3D.objects.get(name=name) for i in [interstate, interstate_proj]: self.assertTrue(i.line.hasz) self.assertEqual(exp_z, tuple(i.line.z)) self._load_city_data() for name, pnt_data in city_data: city = City3D.objects.get(name=name) z = pnt_data[2] self.assertTrue(city.point.hasz) self.assertEqual(z, city.point.z) def test_3d_polygons(self): """ Test the creation of polygon 3D models. """ self._load_polygon_data() p3d = Polygon3D.objects.get(name='3D BBox') self.assertTrue(p3d.poly.hasz) self.assertIsInstance(p3d.poly, Polygon) self.assertEqual(p3d.poly.srid, 32140) def test_3d_layermapping(self): """ Testing LayerMapping on 3D models. """ point_mapping = {'point': 'POINT'} mpoint_mapping = {'mpoint': 'MULTIPOINT'} # The VRT is 3D, but should still be able to map sans the Z. lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False) lm.save() self.assertEqual(3, Point2D.objects.count()) # The city shapefile is 2D, and won't be able to fill the coordinates # in the 3D model -- thus, a LayerMapError is raised. with self.assertRaises(LayerMapError): LayerMapping(Point3D, city_file, point_mapping, transform=False) # 3D model should take 3D data just fine. lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False) lm.save() self.assertEqual(3, Point3D.objects.count()) # Making sure LayerMapping.make_multi works right, by converting # a Point25D into a MultiPoint25D. lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False) lm.save() self.assertEqual(3, MultiPoint3D.objects.count()) @ignore_warnings(category=RemovedInDjango20Warning) def test_kml(self): """ Test GeoQuerySet.kml() with Z values. """ self._load_city_data() h = City3D.objects.kml(precision=6).get(name='Houston') # KML should be 3D. # `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';` ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$') self.assertTrue(ref_kml_regex.match(h.kml)) @ignore_warnings(category=RemovedInDjango20Warning) def test_geojson(self): """ Test GeoQuerySet.geojson() with Z values. """ self._load_city_data() h = City3D.objects.geojson(precision=6).get(name='Houston') # GeoJSON should be 3D # `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';` ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$') self.assertTrue(ref_json_regex.match(h.geojson)) @skipUnlessDBFeature("supports_3d_functions") def test_union(self): """ Testing the Union aggregate of 3D models. """ # PostGIS query that returned the reference EWKT for this test: # `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;` self._load_city_data() ref_ewkt = ( 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,' '-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,' '-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)' ) ref_union = GEOSGeometry(ref_ewkt) union = City3D.objects.aggregate(Union('point'))['point__union'] self.assertTrue(union.hasz) # Ordering of points in the resulting geometry may vary between implementations self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union}) @skipUnlessDBFeature("supports_3d_functions") def test_extent(self): """ Testing the Extent3D aggregate for 3D models. """ self._load_city_data() # `SELECT ST_Extent3D(point) FROM geo3d_city3d;` ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433) extent = City3D.objects.aggregate(Extent3D('point'))['point__extent3d'] def check_extent3d(extent3d, tol=6): for ref_val, ext_val in zip(ref_extent3d, extent3d): self.assertAlmostEqual(ref_val, ext_val, tol) check_extent3d(extent) self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d']) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_perimeter(self): """ Testing GeoQuerySet.perimeter() on 3D fields. """ self._load_polygon_data() # Reference query for values below: # `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;` ref_perim_3d = 76859.2620451 ref_perim_2d = 76859.2577803 tol = 6 self.assertAlmostEqual(ref_perim_2d, Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m, tol) self.assertAlmostEqual(ref_perim_3d, Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m, tol) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_length(self): """ Testing GeoQuerySet.length() on 3D fields. """ # ST_Length_Spheroid Z-aware, and thus does not need to use # a separate function internally. # `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]') # FROM geo3d_interstate[2d|3d];` self._load_interstate_data() tol = 3 ref_length_2d = 4368.1721949481 ref_length_3d = 4368.62547052088 self.assertAlmostEqual(ref_length_2d, Interstate2D.objects.length().get(name='I-45').length.m, tol) self.assertAlmostEqual(ref_length_3d, Interstate3D.objects.length().get(name='I-45').length.m, tol) # Making sure `ST_Length3D` is used on for a projected # and 3D model rather than `ST_Length`. # `SELECT ST_Length(line) FROM geo3d_interstateproj2d;` ref_length_2d = 4367.71564892392 # `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;` ref_length_3d = 4368.16897234101 self.assertAlmostEqual(ref_length_2d, InterstateProj2D.objects.length().get(name='I-45').length.m, tol) self.assertAlmostEqual(ref_length_3d, InterstateProj3D.objects.length().get(name='I-45').length.m, tol) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_scale(self): """ Testing GeoQuerySet.scale() on Z values. """ self._load_city_data() # Mapping of City name to reference Z values. zscales = (-3, 4, 23) for zscale in zscales: for city in City3D.objects.scale(1.0, 1.0, zscale): self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z) @ignore_warnings(category=RemovedInDjango20Warning) @skipUnlessDBFeature("supports_3d_functions") def test_translate(self): """ Testing GeoQuerySet.translate() on Z values. """ self._load_city_data() ztranslations = (5.23, 23, -17) for ztrans in ztranslations: for city in City3D.objects.translate(0, 0, ztrans): self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z) @skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.") @skipUnlessDBFeature("gis_enabled", "supports_3d_functions") class Geo3DFunctionsTests(Geo3DLoadingHelper, TestCase): def test_kml(self): """ Test KML() function with Z values. """ self._load_city_data() h = City3D.objects.annotate(kml=AsKML('point', precision=6)).get(name='Houston') # KML should be 3D. # `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';` ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$') self.assertTrue(ref_kml_regex.match(h.kml)) def test_geojson(self): """ Test GeoJSON() function with Z values. """ self._load_city_data() h = City3D.objects.annotate(geojson=AsGeoJSON('point', precision=6)).get(name='Houston') # GeoJSON should be 3D # `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';` ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$') self.assertTrue(ref_json_regex.match(h.geojson)) def test_perimeter(self): """ Testing Perimeter() function on 3D fields. """ self._load_polygon_data() # Reference query for values below: # `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;` ref_perim_3d = 76859.2620451 ref_perim_2d = 76859.2577803 tol = 6 poly2d = Polygon2D.objects.annotate(perimeter=Perimeter('poly')).get(name='2D BBox') self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol) poly3d = Polygon3D.objects.annotate(perimeter=Perimeter('poly')).get(name='3D BBox') self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol) def test_length(self): """ Testing Length() function on 3D fields. """ # ST_Length_Spheroid Z-aware, and thus does not need to use # a separate function internally. # `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]') # FROM geo3d_interstate[2d|3d];` self._load_interstate_data() tol = 3 ref_length_2d = 4368.1721949481 ref_length_3d = 4368.62547052088 inter2d = Interstate2D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol) inter3d = Interstate3D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol) # Making sure `ST_Length3D` is used on for a projected # and 3D model rather than `ST_Length`. # `SELECT ST_Length(line) FROM geo3d_interstateproj2d;` ref_length_2d = 4367.71564892392 # `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;` ref_length_3d = 4368.16897234101 inter2d = InterstateProj2D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol) inter3d = InterstateProj3D.objects.annotate(length=Length('line')).get(name='I-45') self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol) def test_scale(self): """ Testing Scale() function on Z values. """ self._load_city_data() # Mapping of City name to reference Z values. zscales = (-3, 4, 23) for zscale in zscales: for city in City3D.objects.annotate(scale=Scale('point', 1.0, 1.0, zscale)): self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z) def test_translate(self): """ Testing Translate() function on Z values. """ self._load_city_data() ztranslations = (5.23, 23, -17) for ztrans in ztranslations: for city in City3D.objects.annotate(translate=Translate('point', 0, 0, ztrans)): self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
bsd-3-clause
Antiun/bank-payment
account_banking_sepa_direct_debit/models/account_banking_mandate.py
11
6872
# -*- encoding: utf-8 -*- ############################################################################## # # SEPA Direct Debit module for OpenERP # Copyright (C) 2013 Akretion (http://www.akretion.com) # @author: Alexis de Lattre <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, exceptions, _ from datetime import datetime from dateutil.relativedelta import relativedelta import logging NUMBER_OF_UNUSED_MONTHS_BEFORE_EXPIRY = 36 logger = logging.getLogger(__name__) class AccountBankingMandate(models.Model): """SEPA Direct Debit Mandate""" _inherit = 'account.banking.mandate' _track = { 'recurrent_sequence_type': { 'account_banking_sepa_direct_debit.recurrent_sequence_type_first': lambda self, cr, uid, obj, ctx=None: obj['recurrent_sequence_type'] == 'first', 'account_banking_sepa_direct_debit.' 'recurrent_sequence_type_recurring': lambda self, cr, uid, obj, ctx=None: obj['recurrent_sequence_type'] == 'recurring', 'account_banking_sepa_direct_debit.recurrent_sequence_type_final': lambda self, cr, uid, obj, ctx=None: obj['recurrent_sequence_type'] == 'final', } } type = fields.Selection([('recurrent', 'Recurrent'), ('oneoff', 'One-Off')], string='Type of Mandate', required=True, track_visibility='always') recurrent_sequence_type = fields.Selection( [('first', 'First'), ('recurring', 'Recurring'), ('final', 'Final')], string='Sequence Type for Next Debit', track_visibility='onchange', help="This field is only used for Recurrent mandates, not for " "One-Off mandates.", default="first") sepa_migrated = fields.Boolean( string='Migrated to SEPA', track_visibility='onchange', help="If this field is not active, the mandate section of the next " "direct debit file that include this mandate will contain the " "'Original Mandate Identification' and the 'Original Creditor " "Scheme Identification'. This is required in a few countries " "(Belgium for instance), but not in all countries. If this is " "not required in your country, you should keep this field always " "active.", default=True) original_mandate_identification = fields.Char( string='Original Mandate Identification', track_visibility='onchange', size=35, help="When the field 'Migrated to SEPA' is not active, this field " "will be used as the Original Mandate Identification in the " "Direct Debit file.") scheme = fields.Selection([('CORE', 'Basic (CORE)'), ('B2B', 'Enterprise (B2B)')], string='Scheme', required=True, default="CORE") unique_mandate_reference = fields.Char(size=35) # cf ISO 20022 @api.one @api.constrains('type', 'recurrent_sequence_type') def _check_recurring_type(self): if (self.type == 'recurrent' and not self.recurrent_sequence_type): raise exceptions.Warning( _("The recurrent mandate '%s' must have a sequence type.") % self.unique_mandate_reference) @api.one @api.constrains('type', 'recurrent_sequence_type', 'sepa_migrated') def _check_migrated_to_sepa(self): if (self.type == 'recurrent' and not self.sepa_migrated and self.recurrent_sequence_type != 'first'): raise exceptions.Warning( _("The recurrent mandate '%s' which is not marked as " "'Migrated to SEPA' must have its recurrent sequence type " "set to 'First'.") % self.unique_mandate_reference) @api.one @api.constrains('type', 'original_mandate_identification', 'sepa_migrated') def _check_original_mandate_identification(self): if (self.type == 'recurrent' and not self.sepa_migrated and not self.original_mandate_identification): raise exceptions.Warning( _("You must set the 'Original Mandate Identification' on the " "recurrent mandate '%s' which is not marked as 'Migrated to " "SEPA'.") % self.unique_mandate_reference) @api.one @api.onchange('partner_bank_id') def mandate_partner_bank_change(self): super(AccountBankingMandate, self).mandate_partner_bank_change() res = {} if (self.state == 'valid' and self.partner_bank_id and self.type == 'recurrent' and self.recurrent_sequence_type != 'first'): self.recurrent_sequence_type = 'first' res['warning'] = { 'title': _('Mandate update'), 'message': _("As you changed the bank account attached to " "this mandate, the 'Sequence Type' has been set " "back to 'First'."), } return res @api.multi def _sdd_mandate_set_state_to_expired(self): logger.info('Searching for SDD Mandates that must be set to Expired') expire_limit_date = datetime.today() + \ relativedelta(months=-NUMBER_OF_UNUSED_MONTHS_BEFORE_EXPIRY) expire_limit_date_str = expire_limit_date.strftime('%Y-%m-%d') expired_mandates = self.search( ['|', ('last_debit_date', '=', False), ('last_debit_date', '<=', expire_limit_date_str), ('state', '=', 'valid'), ('signature_date', '<=', expire_limit_date_str)]) if expired_mandates: expired_mandates.write({'state': 'expired'}) logger.info( 'The following SDD Mandate IDs has been set to expired: %s' % expired_mandates.ids) else: logger.info('0 SDD Mandates must be set to Expired') return True
agpl-3.0
xgaia/askomics
askomics/test/GalaxyConnector_test.py
2
5304
""" This file contain all test for the GalaxyConnector class. This test need a Galaxy instance to be executed """ import os import time import unittest from shutil import copyfile from bioblend import galaxy from pyramid.paster import get_appsettings from pyramid import testing from askomics.libaskomics.ParamManager import ParamManager from askomics.libaskomics.GalaxyConnector import GalaxyConnector from SetupTests import SetupTests from interface_galaxy import InterfaceGalaxy from nose.plugins.attrib import attr @attr('galaxy') class GalaxyConnectorTests(unittest.TestCase): """ Set up settings and request before testing GalaxyConnector Also delete old testing history in galaxy, and create a new one (with 2 datasets) """ def setUp(self): """Set up the settings and the session""" self.settings = get_appsettings('configs/tests.ini', name='main') self.settings['askomics.upload_user_data_method'] = 'insert' self.request = testing.DummyRequest() self.request.session['username'] = 'jdoe' self.request.session['group'] = 'base' self.request.session['admin'] = False self.request.session['blocked'] = True SetupTests(self.settings, self.request.session) # Galaxy self.interface_galaxy = InterfaceGalaxy(self.settings, self.request) self.galaxy = self.interface_galaxy.get_galaxy_credentials() self.interface_galaxy.delete_testing_histories() self.history_id = self.interface_galaxy.create_testing_history() self.interface_galaxy.upload_file_into_history('people.tsv') self.interface_galaxy.upload_file_into_history('instruments.tsv') self.interface_galaxy.upload_string_into_history('hello_world.txt', 'hello world') self.interface_galaxy.wait_until_datasets_ready() self.datasets = self.interface_galaxy.get_datasets_id() def test_check_galaxy_instance(self): """Test the check_galaxy_instance method""" galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key']) assert galaxy_connector.check_galaxy_instance() is True #FIXME: Don't raise the ConnectionError # with self.assertRaises(ConnectionError): # GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], 'fake_api_key') def test_get_datasets_and_histories(self): """Test the get_datasets_and_histories method""" galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key']) # Test with history id result = galaxy_connector.get_datasets_and_histories(['tabular'], history_id=self.history_id) created_history = { 'name': 'askomics_test', 'id': self.history_id, 'selected': True } assert isinstance(result, dict) assert len(result) == 2 assert 'datasets' in result assert 'histories' in result assert created_history in result['histories'] # Test without history id result = galaxy_connector.get_datasets_and_histories(['tabular']) created_history = { 'name': 'askomics_test', 'id': self.history_id, 'selected': True } assert isinstance(result, dict) assert len(result) == 2 assert 'datasets' in result assert 'histories' in result assert created_history in result['histories'] def test_upload_files(self): """Test upload_files method""" galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key']) galaxy_connector.upload_files([self.datasets['hello']['dataset_id']]) assert self.interface_galaxy.check_uploaded_files(self.settings['askomics.files_dir'] + '/' + self.request.session['username'] + '/upload/') is True def test_get_file_content(self): """Test get_file_content method""" galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key']) content = galaxy_connector.get_file_content(self.datasets['hello']['dataset_id']) expected_content = 'hello world\n' assert content == expected_content def test_send_to_history(self): """Test the send_to_history method""" galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key']) param_manager = ParamManager(self.settings, self.request.session) src_file = param_manager.get_upload_directory() filepath = src_file + 'play_instrument.tsv' galaxy_connector.send_to_history(filepath, 'play_instrument.tsv', 'tabular') assert self.interface_galaxy.check_dataset_presence('play_instrument.tsv') is True def test_send_json_to_history(self): """Test the send_json_to_history method""" galaxy_connector = GalaxyConnector(self.settings, self.request.session, self.galaxy['url'], self.galaxy['key']) galaxy_connector.send_json_to_history('hello world') assert self.interface_galaxy.check_dataset_presence('askomics_query_', start_with=True) is True
agpl-3.0
tchellomello/home-assistant
homeassistant/components/dyson/vacuum.py
7
6607
"""Support for the Dyson 360 eye vacuum cleaner robot.""" import logging from libpurecool.const import Dyson360EyeMode, PowerMode from libpurecool.dyson_360_eye import Dyson360Eye from homeassistant.components.vacuum import ( SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_PAUSE, SUPPORT_RETURN_HOME, SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, VacuumEntity, ) from homeassistant.helpers.icon import icon_for_battery_level from . import DYSON_DEVICES _LOGGER = logging.getLogger(__name__) ATTR_CLEAN_ID = "clean_id" ATTR_FULL_CLEAN_TYPE = "full_clean_type" ATTR_POSITION = "position" DYSON_360_EYE_DEVICES = "dyson_360_eye_devices" SUPPORT_DYSON = ( SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_STATUS | SUPPORT_BATTERY | SUPPORT_STOP ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Dyson 360 Eye robot vacuum platform.""" _LOGGER.debug("Creating new Dyson 360 Eye robot vacuum") if DYSON_360_EYE_DEVICES not in hass.data: hass.data[DYSON_360_EYE_DEVICES] = [] # Get Dyson Devices from parent component for device in [d for d in hass.data[DYSON_DEVICES] if isinstance(d, Dyson360Eye)]: dyson_entity = Dyson360EyeDevice(device) hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity) add_entities(hass.data[DYSON_360_EYE_DEVICES]) return True class Dyson360EyeDevice(VacuumEntity): """Dyson 360 Eye robot vacuum device.""" def __init__(self, device): """Dyson 360 Eye robot vacuum device.""" _LOGGER.debug("Creating device %s", device.name) self._device = device async def async_added_to_hass(self): """Call when entity is added to hass.""" self.hass.async_add_job(self._device.add_message_listener, self.on_message) def on_message(self, message): """Handle a new messages that was received from the vacuum.""" _LOGGER.debug("Message received for %s device: %s", self.name, message) self.schedule_update_ha_state() @property def should_poll(self) -> bool: """Return True if entity has to be polled for state. False if entity pushes its state to HA. """ return False @property def name(self): """Return the name of the device.""" return self._device.name @property def status(self): """Return the status of the vacuum cleaner.""" dyson_labels = { Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging", Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged", Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused", Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning", Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home", Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning", Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked", Dyson360EyeMode.FAULT_REPLACE_ON_DOCK: "Error - Replace device on dock", Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished", Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging", } return dyson_labels.get(self._device.state.state, self._device.state.state) @property def battery_level(self): """Return the battery level of the vacuum cleaner.""" return self._device.state.battery_level @property def fan_speed(self): """Return the fan speed of the vacuum cleaner.""" speed_labels = {PowerMode.MAX: "Max", PowerMode.QUIET: "Quiet"} return speed_labels[self._device.state.power_mode] @property def fan_speed_list(self): """Get the list of available fan speed steps of the vacuum cleaner.""" return ["Quiet", "Max"] @property def device_state_attributes(self): """Return the specific state attributes of this vacuum cleaner.""" return {ATTR_POSITION: str(self._device.state.position)} @property def is_on(self) -> bool: """Return True if entity is on.""" return self._device.state.state in [ Dyson360EyeMode.FULL_CLEAN_INITIATED, Dyson360EyeMode.FULL_CLEAN_ABORTED, Dyson360EyeMode.FULL_CLEAN_RUNNING, ] @property def available(self) -> bool: """Return True if entity is available.""" return True @property def supported_features(self): """Flag vacuum cleaner robot features that are supported.""" return SUPPORT_DYSON @property def battery_icon(self): """Return the battery icon for the vacuum cleaner.""" charging = self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGING] return icon_for_battery_level( battery_level=self.battery_level, charging=charging ) def turn_on(self, **kwargs): """Turn the vacuum on.""" _LOGGER.debug("Turn on device %s", self.name) if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]: self._device.resume() else: self._device.start() def turn_off(self, **kwargs): """Turn the vacuum off and return to home.""" _LOGGER.debug("Turn off device %s", self.name) self._device.pause() def stop(self, **kwargs): """Stop the vacuum cleaner.""" _LOGGER.debug("Stop device %s", self.name) self._device.pause() def set_fan_speed(self, fan_speed, **kwargs): """Set fan speed.""" _LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name) power_modes = {"Quiet": PowerMode.QUIET, "Max": PowerMode.MAX} self._device.set_power_mode(power_modes[fan_speed]) def start_pause(self, **kwargs): """Start, pause or resume the cleaning task.""" if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]: _LOGGER.debug("Resume device %s", self.name) self._device.resume() elif self._device.state.state in [ Dyson360EyeMode.INACTIVE_CHARGED, Dyson360EyeMode.INACTIVE_CHARGING, ]: _LOGGER.debug("Start device %s", self.name) self._device.start() else: _LOGGER.debug("Pause device %s", self.name) self._device.pause() def return_to_base(self, **kwargs): """Set the vacuum cleaner to return to the dock.""" _LOGGER.debug("Return to base device %s", self.name) self._device.abort()
apache-2.0
pratikmallya/hue
desktop/core/ext-py/Paste-2.0.1/tests/test_util/test_datetimeutil.py
47
6026
# (c) 2005 Clark C. Evans and contributors # This module is part of the Python Paste Project and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # Some of this code was funded by: http://prometheusresearch.com from time import localtime from datetime import date from paste.util.datetimeutil import * def test_timedelta(): assert('' == normalize_timedelta("")) assert('0.10' == normalize_timedelta("6m")) assert('0.50' == normalize_timedelta("30m")) assert('0.75' == normalize_timedelta("45m")) assert('1.00' == normalize_timedelta("60 min")) assert('1.50' == normalize_timedelta("90min")) assert('1.50' == normalize_timedelta("1.50")) assert('4.50' == normalize_timedelta("4 : 30")) assert('1.50' == normalize_timedelta("1h 30m")) assert('1.00' == normalize_timedelta("1")) assert('1.00' == normalize_timedelta("1 hour")) assert('8.00' == normalize_timedelta("480 mins")) assert('8.00' == normalize_timedelta("8h")) assert('0.50' == normalize_timedelta("0.5")) assert('0.10' == normalize_timedelta(".1")) assert('0.50' == normalize_timedelta(".50")) assert('0.75' == normalize_timedelta("0.75")) def test_time(): assert('03:00 PM' == normalize_time("3p", ampm=True)) assert('03:00 AM' == normalize_time("300", ampm=True)) assert('03:22 AM' == normalize_time("322", ampm=True)) assert('01:22 PM' == normalize_time("1322", ampm=True)) assert('01:00 PM' == normalize_time("13", ampm=True)) assert('12:00 PM' == normalize_time("noon", ampm=True)) assert("06:00 PM" == normalize_time("6", ampm=True)) assert("01:00 PM" == normalize_time("1", ampm=True)) assert("07:00 AM" == normalize_time("7", ampm=True)) assert("01:00 PM" == normalize_time("1 pm", ampm=True)) assert("03:30 PM" == normalize_time("3:30 pm", ampm=True)) assert("03:30 PM" == normalize_time("3 30 pm", ampm=True)) assert("03:30 PM" == normalize_time("3 30 P.M.", ampm=True)) assert("12:00 PM" == normalize_time("0", ampm=True)) assert("12:00 AM" == normalize_time("1200 AM", ampm=True)) def test_date(): tm = localtime() yr = tm[0] mo = tm[1] assert(date(yr,4,11) == parse_date("411")) assert(date(yr,4,11) == parse_date("APR11")) assert(date(yr,4,11) == parse_date("11APR")) assert(date(yr,4,11) == parse_date("4 11")) assert(date(yr,4,11) == parse_date("11 APR")) assert(date(yr,4,11) == parse_date("APR 11")) assert(date(yr,mo,11) == parse_date("11")) assert(date(yr,4,1) == parse_date("APR")) assert(date(yr,4,11) == parse_date("4/11")) assert(date.today() == parse_date("today")) assert(date.today() == parse_date("now")) assert(None == parse_date("")) assert('' == normalize_date(None)) assert('2001-02-03' == normalize_date("20010203")) assert('1999-04-11' == normalize_date("1999 4 11")) assert('1999-04-11' == normalize_date("1999 APR 11")) assert('1999-04-11' == normalize_date("APR 11 1999")) assert('1999-04-11' == normalize_date("11 APR 1999")) assert('1999-04-11' == normalize_date("4 11 1999")) assert('1999-04-01' == normalize_date("1999 APR")) assert('1999-04-01' == normalize_date("1999 4")) assert('1999-04-01' == normalize_date("4 1999")) assert('1999-04-01' == normalize_date("APR 1999")) assert('1999-01-01' == normalize_date("1999")) assert('1999-04-01' == normalize_date("1APR1999")) assert('2001-04-01' == normalize_date("1APR2001")) assert('1999-04-18' == normalize_date("1999-04-11+7")) assert('1999-04-18' == normalize_date("1999-04-11 7")) assert('1999-04-01' == normalize_date("1 apr 1999")) assert('1999-04-11' == normalize_date("11 apr 1999")) assert('1999-04-11' == normalize_date("11 Apr 1999")) assert('1999-04-11' == normalize_date("11-apr-1999")) assert('1999-04-11' == normalize_date("11 April 1999")) assert('1999-04-11' == normalize_date("11 APRIL 1999")) assert('1999-04-11' == normalize_date("11 april 1999")) assert('1999-04-11' == normalize_date("11 aprick 1999")) assert('1999-04-11' == normalize_date("APR 11, 1999")) assert('1999-04-11' == normalize_date("4/11/1999")) assert('1999-04-11' == normalize_date("4-11-1999")) assert('1999-04-11' == normalize_date("1999-4-11")) assert('1999-04-11' == normalize_date("19990411")) assert('1999-01-01' == normalize_date("1 Jan 1999")) assert('1999-02-01' == normalize_date("1 Feb 1999")) assert('1999-03-01' == normalize_date("1 Mar 1999")) assert('1999-04-01' == normalize_date("1 Apr 1999")) assert('1999-05-01' == normalize_date("1 May 1999")) assert('1999-06-01' == normalize_date("1 Jun 1999")) assert('1999-07-01' == normalize_date("1 Jul 1999")) assert('1999-08-01' == normalize_date("1 Aug 1999")) assert('1999-09-01' == normalize_date("1 Sep 1999")) assert('1999-10-01' == normalize_date("1 Oct 1999")) assert('1999-11-01' == normalize_date("1 Nov 1999")) assert('1999-12-01' == normalize_date("1 Dec 1999")) assert('1999-04-30' == normalize_date("1999-4-30")) assert('2000-02-29' == normalize_date("29 FEB 2000")) assert('2001-02-28' == normalize_date("28 FEB 2001")) assert('2004-02-29' == normalize_date("29 FEB 2004")) assert('2100-02-28' == normalize_date("28 FEB 2100")) assert('1900-02-28' == normalize_date("28 FEB 1900")) def assertError(val): try: normalize_date(val) except (TypeError,ValueError): return raise ValueError("type error expected", val) assertError("2000-13-11") assertError("APR 99") assertError("29 FEB 1900") assertError("29 FEB 2100") assertError("29 FEB 2001") assertError("1999-4-31") assertError("APR 99") assertError("20301") assertError("020301") assertError("1APR99") assertError("1APR01") assertError("1 APR 99") assertError("1 APR 01") assertError("11/5/01")
apache-2.0
stu314159/pyNFC
pyNFC_Util.py
1
2945
# pyNFC_Util.py """ definition module for some pyNFC utility classes """ import numpy as np class NFC_Halo_Data_Organizer(object): """ collect and organize how halo data will be organized prior to communication """ def __init__(self,ngb_rank): """ constructor """ self.ngb_rank = ngb_rank self.halo_data = {} def get_ngb_rank(self): return self.ngb_rank def insert(self,gnn,spd): """ data insertion function: gnn - global node number of outgoing/incoming halo data spd - speed of outgoing/incoming halo data """ self.halo_data.setdefault(gnn,[]).append(spd) # more pythonic def make_lists(self): # only need be done once """ once all data is inserted, this function will create two lists: the gnn for outgoing data to this rank; and the vector of speeds for the outgoing data. """ self.gnn_list = [] self.spd_list = [] sorted_keys = sorted(self.halo_data.keys()); # sort the gnn keys for k in sorted_keys: values = self.halo_data[k]; values = sorted(values) # sort the spds for each gnn for v in values: self.gnn_list.append(k) self.spd_list.append(v) self.spd_array = np.array(self.spd_list,dtype=np.int32) def make_lists_local(self,global_to_local): """ make and store local node number version of lists (but keep gnn ordering) -- be careful """ self.lnn_list = [] for g in self.gnn_list: # preserve order? self.lnn_list.append(global_to_local[g]) #preserve order? self.lnn_array = np.array(self.lnn_list,dtype=np.int32) def count_data_members(self): """ report the number of items to be sent from this partition """ return len(self.gnn_list) def allocate_buffer(self): """ construct buffer for data in/out """ self.buff_len = len(self.gnn_list) self.buffer = np.empty([self.buff_len],dtype=np.float32) def extract_halo_data(self,fOut): """ extract required data from fOut and place into buffer """ for d in range(len(self.lnn_list)): ln = self.lnn_list[d]; spd = self.spd_list[d]; self.buffer[d] = fOut[ln,spd] # give this a shot #self.buffer[:] = fOut[self.lnn_list[:],self.spd_list[:]] def insert_boundary_data(self,f): """ insert stream-in data into the appropriate boundary node/speed """ for d in range(len(self.lnn_list)): ln = self.lnn_list[d]; spd = self.spd_list[d]; f[ln,spd] = self.buffer[d] # give this a shot #f[self.lnn_list[:],self.spd_list[:]]
mit
jfarcher/checkinapi
flask/lib/python2.7/site-packages/pip/vendor/html5lib/tokenizer.py
1710
76929
from __future__ import absolute_import, division, unicode_literals try: chr = unichr # flake8: noqa except NameError: pass from collections import deque from .constants import spaceCharacters from .constants import entities from .constants import asciiLetters, asciiUpper2Lower from .constants import digits, hexDigits, EOF from .constants import tokenTypes, tagTokenTypes from .constants import replacementCharacters from .inputstream import HTMLInputStream from .trie import Trie entitiesTrie = Trie(entities) class HTMLTokenizer(object): """ This class takes care of tokenizing HTML. * self.currentToken Holds the token that is currently being processed. * self.state Holds a reference to the method to be invoked... XXX * self.stream Points to HTMLInputStream object. """ def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, lowercaseElementName=True, lowercaseAttrName=True, parser=None): self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet) self.parser = parser # Perform case conversions? self.lowercaseElementName = lowercaseElementName self.lowercaseAttrName = lowercaseAttrName # Setup the initial tokenizer state self.escapeFlag = False self.lastFourChars = [] self.state = self.dataState self.escape = False # The current token being created self.currentToken = None super(HTMLTokenizer, self).__init__() def __iter__(self): """ This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested. """ self.tokenQueue = deque([]) # Start processing. When EOF is reached self.state will return False # instead of True and the loop will terminate. while self.state(): while self.stream.errors: yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} while self.tokenQueue: yield self.tokenQueue.popleft() def consumeNumberEntity(self, isHex): """This function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. """ allowed = digits radix = 10 if isHex: allowed = hexDigits radix = 16 charStack = [] # Consume all the characters that are in range while making sure we # don't hit an EOF. c = self.stream.char() while c in allowed and c is not EOF: charStack.append(c) c = self.stream.char() # Convert the set of characters consumed to an int. charAsInt = int("".join(charStack), radix) # Certain characters get replaced with others if charAsInt in replacementCharacters: char = replacementCharacters[charAsInt] self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) elif ((0xD800 <= charAsInt <= 0xDFFF) or (charAsInt > 0x10FFFF)): char = "\uFFFD" self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) else: # Should speed up this check somehow (e.g. move the set to a constant) if ((0x0001 <= charAsInt <= 0x0008) or (0x000E <= charAsInt <= 0x001F) or (0x007F <= charAsInt <= 0x009F) or (0xFDD0 <= charAsInt <= 0xFDEF) or charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF])): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) try: # Try/except needed as UCS-2 Python builds' unichar only works # within the BMP. char = chr(charAsInt) except ValueError: v = charAsInt - 0x10000 char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) # Discard the ; if present. Otherwise, put it back on the queue and # invoke parseError on parser. if c != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "numeric-entity-without-semicolon"}) self.stream.unget(c) return char def consumeEntity(self, allowedChar=None, fromAttribute=False): # Initialise to the default output for when no entity is matched output = "&" charStack = [self.stream.char()] if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or (allowedChar is not None and allowedChar == charStack[0])): self.stream.unget(charStack[0]) elif charStack[0] == "#": # Read the next character to see if it's hex or decimal hex = False charStack.append(self.stream.char()) if charStack[-1] in ("x", "X"): hex = True charStack.append(self.stream.char()) # charStack[-1] should be the first digit if (hex and charStack[-1] in hexDigits) \ or (not hex and charStack[-1] in digits): # At least one digit found, so consume the whole number self.stream.unget(charStack[-1]) output = self.consumeNumberEntity(hex) else: # No digits found self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-numeric-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: # At this point in the process might have named entity. Entities # are stored in the global variable "entities". # # Consume characters and compare to these to a substring of the # entity names in the list until the substring no longer matches. while (charStack[-1] is not EOF): if not entitiesTrie.has_keys_with_prefix("".join(charStack)): break charStack.append(self.stream.char()) # At this point we have a string that starts with some characters # that may match an entity # Try to find the longest entity the string will match to take care # of &noti for instance. try: entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) entityLength = len(entityName) except KeyError: entityName = None if entityName is not None: if entityName[-1] != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "named-entity-without-semicolon"}) if (entityName[-1] != ";" and fromAttribute and (charStack[entityLength] in asciiLetters or charStack[entityLength] in digits or charStack[entityLength] == "=")): self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: output = entities[entityName] self.stream.unget(charStack.pop()) output += "".join(charStack[entityLength:]) else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-named-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) if fromAttribute: self.currentToken["data"][-1][1] += output else: if output in spaceCharacters: tokenType = "SpaceCharacters" else: tokenType = "Characters" self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) def processEntityInAttribute(self, allowedChar): """This method replaces the need for "entityInAttributeValueState". """ self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) def emitCurrentToken(self): """This method is a generic handler for emitting the tags. It also sets the state to "data" because that's what's needed after a token has been emitted. """ token = self.currentToken # Add token to the queue to be yielded if (token["type"] in tagTokenTypes): if self.lowercaseElementName: token["name"] = token["name"].translate(asciiUpper2Lower) if token["type"] == tokenTypes["EndTag"]: if token["data"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "attributes-in-end-tag"}) if token["selfClosing"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "self-closing-flag-on-end-tag"}) self.tokenQueue.append(token) self.state = self.dataState # Below are the various tokenizer states worked out. def dataState(self): data = self.stream.char() if data == "&": self.state = self.entityDataState elif data == "<": self.state = self.tagOpenState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\u0000"}) elif data is EOF: # Tokenization ends. return False elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def entityDataState(self): self.consumeEntity() self.state = self.dataState return True def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def characterReferenceInRcdata(self): self.consumeEntity() self.state = self.rcdataState return True def rawtextState(self): data = self.stream.char() if data == "<": self.state = self.rawtextLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataState(self): data = self.stream.char() if data == "<": self.state = self.scriptDataLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def plaintextState(self): data = self.stream.char() if data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + self.stream.charsUntil("\u0000")}) return True def tagOpenState(self): data = self.stream.char() if data == "!": self.state = self.markupDeclarationOpenState elif data == "/": self.state = self.closeTagOpenState elif data in asciiLetters: self.currentToken = {"type": tokenTypes["StartTag"], "name": data, "data": [], "selfClosing": False, "selfClosingAcknowledged": False} self.state = self.tagNameState elif data == ">": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-right-bracket"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) self.state = self.dataState elif data == "?": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-question-mark"}) self.stream.unget(data) self.state = self.bogusCommentState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.dataState return True def closeTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.currentToken = {"type": tokenTypes["EndTag"], "name": data, "data": [], "selfClosing": False} self.state = self.tagNameState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-right-bracket"}) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-eof"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.state = self.dataState else: # XXX data can be _'_... self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-char", "datavars": {"data": data}}) self.stream.unget(data) self.state = self.bogusCommentState return True def tagNameState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-tag-name"}) self.state = self.dataState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" else: self.currentToken["name"] += data # (Don't use charsUntil here, because tag names are # very short and it's faster to not do anything fancy) return True def rcdataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rcdataEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rcdataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rcdataState return True def rawtextLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rawtextEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rawtextEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rawtextState return True def scriptDataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEndTagOpenState elif data == "!": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) self.state = self.scriptDataEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.scriptDataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapeStartDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.state = self.dataState else: chars = self.stream.charsUntil(("<", "-", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEscapedEndTagOpenState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) self.temporaryBuffer = data self.state = self.scriptDataDoubleEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer = data self.state = self.scriptDataEscapedEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapeStartState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataDoubleEscapedState else: self.state = self.scriptDataEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) return True def scriptDataDoubleEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) self.temporaryBuffer = "" self.state = self.scriptDataDoubleEscapeEndState else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapeEndState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataEscapedState else: self.state = self.scriptDataDoubleEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def beforeAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data in ("'", '"', "=", "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-name-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def attributeNameState(self): data = self.stream.char() leavingThisState = True emitToken = False if data == "=": self.state = self.beforeAttributeValueState elif data in asciiLetters: self.currentToken["data"][-1][0] += data +\ self.stream.charsUntil(asciiLetters, True) leavingThisState = False elif data == ">": # XXX If we emit here the attributes are converted to a dict # without being checked and when the code below runs we error # because data is a dict not a list emitToken = True elif data in spaceCharacters: self.state = self.afterAttributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][0] += "\uFFFD" leavingThisState = False elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"][-1][0] += data leavingThisState = False elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-name"}) self.state = self.dataState else: self.currentToken["data"][-1][0] += data leavingThisState = False if leavingThisState: # Attributes are not dropped at this stage. That happens when the # start tag token is emitted so values can still be safely appended # to attributes, but we do want to report the parse error in time. if self.lowercaseAttrName: self.currentToken["data"][-1][0] = ( self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) for name, value in self.currentToken["data"][:-1]: if self.currentToken["data"][-1][0] == name: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "duplicate-attribute"}) break # XXX Fix for above XXX if emitToken: self.emitCurrentToken() return True def afterAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "=": self.state = self.beforeAttributeValueState elif data == ">": self.emitCurrentToken() elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-after-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-end-of-tag-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def beforeAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "\"": self.state = self.attributeValueDoubleQuotedState elif data == "&": self.state = self.attributeValueUnQuotedState self.stream.unget(data) elif data == "'": self.state = self.attributeValueSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-right-bracket"}) self.emitCurrentToken() elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" self.state = self.attributeValueUnQuotedState elif data in ("=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "equals-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState return True def attributeValueDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute('"') elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-double-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("\"", "&", "\u0000")) return True def attributeValueSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute("'") elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-single-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("'", "&", "\u0000")) return True def attributeValueUnQuotedState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == "&": self.processEntityInAttribute(">") elif data == ">": self.emitCurrentToken() elif data in ('"', "'", "=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-no-quotes"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data + self.stream.charsUntil( frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) return True def afterAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-attribute-value"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-attribute-value"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def selfClosingStartTagState(self): data = self.stream.char() if data == ">": self.currentToken["selfClosing"] = True self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def bogusCommentState(self): # Make a new comment token and give it as value all the characters # until the first > or EOF (charsUntil checks for EOF automatically) # and emit it. data = self.stream.charsUntil(">") data = data.replace("\u0000", "\uFFFD") self.tokenQueue.append( {"type": tokenTypes["Comment"], "data": data}) # Eat the character directly after the bogus comment which is either a # ">" or an EOF. self.stream.char() self.state = self.dataState return True def markupDeclarationOpenState(self): charStack = [self.stream.char()] if charStack[-1] == "-": charStack.append(self.stream.char()) if charStack[-1] == "-": self.currentToken = {"type": tokenTypes["Comment"], "data": ""} self.state = self.commentStartState return True elif charStack[-1] in ('d', 'D'): matched = True for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), ('y', 'Y'), ('p', 'P'), ('e', 'E')): charStack.append(self.stream.char()) if charStack[-1] not in expected: matched = False break if matched: self.currentToken = {"type": tokenTypes["Doctype"], "name": "", "publicId": None, "systemId": None, "correct": True} self.state = self.doctypeState return True elif (charStack[-1] == "[" and self.parser is not None and self.parser.tree.openElements and self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): matched = True for expected in ["C", "D", "A", "T", "A", "["]: charStack.append(self.stream.char()) if charStack[-1] != expected: matched = False break if matched: self.state = self.cdataSectionState return True self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-dashes-or-doctype"}) while charStack: self.stream.unget(charStack.pop()) self.state = self.bogusCommentState return True def commentStartState(self): data = self.stream.char() if data == "-": self.state = self.commentStartDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data self.state = self.commentState return True def commentStartDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentState(self): data = self.stream.char() if data == "-": self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data + \ self.stream.charsUntil(("-", "\u0000")) return True def commentEndDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentEndState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--\uFFFD" self.state = self.commentState elif data == "!": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-bang-after-double-dash-in-comment"}) self.state = self.commentEndBangState elif data == "-": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-dash-after-double-dash-in-comment"}) self.currentToken["data"] += data elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-double-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-comment"}) self.currentToken["data"] += "--" + data self.state = self.commentState return True def commentEndBangState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "-": self.currentToken["data"] += "--!" self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--!\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-bang-state"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "--!" + data self.state = self.commentState return True def doctypeState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "need-space-after-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeNameState return True def beforeDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-right-bracket"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] = "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] = data self.state = self.doctypeNameState return True def doctypeNameState(self): data = self.stream.char() if data in spaceCharacters: self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.state = self.afterDoctypeNameState elif data == ">": self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype-name"}) self.currentToken["correct"] = False self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] += data return True def afterDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.currentToken["correct"] = False self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: if data in ("p", "P"): matched = True for expected in (("u", "U"), ("b", "B"), ("l", "L"), ("i", "I"), ("c", "C")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypePublicKeywordState return True elif data in ("s", "S"): matched = True for expected in (("y", "Y"), ("s", "S"), ("t", "T"), ("e", "E"), ("m", "M")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypeSystemKeywordState return True # All the characters read before the current 'data' will be # [a-zA-Z], so they're garbage in the bogus doctype and can be # discarded; only the latest character might be '>' or EOF # and needs to be ungetted self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-space-or-right-bracket-in-doctype", "datavars": {"data": data}}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypePublicKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypePublicIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState return True def beforeDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierDoubleQuotedState elif data == "'": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypePublicIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def doctypePublicIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def afterDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.betweenDoctypePublicAndSystemIdentifiersState elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def betweenDoctypePublicAndSystemIdentifiersState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypeSystemKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeSystemIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState return True def beforeDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypeSystemIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def doctypeSystemIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def afterDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.state = self.bogusDoctypeState return True def bogusDoctypeState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: # XXX EMIT self.stream.unget(data) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: pass return True def cdataSectionState(self): data = [] while True: data.append(self.stream.charsUntil("]")) data.append(self.stream.charsUntil(">")) char = self.stream.char() if char == EOF: break else: assert char == ">" if data[-1][-2:] == "]]": data[-1] = data[-1][:-2] break else: data.append(char) data = "".join(data) # Deal with null here rather than in the parser nullCount = data.count("\u0000") if nullCount > 0: for i in range(nullCount): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) data = data.replace("\u0000", "\uFFFD") if data: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.dataState return True
gpl-3.0
vwolfley/GrandCanyonCouncilBSA_Leaflet
node_modules/grunt-version-check/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py
2354
10366
# Unmodified from http://code.activestate.com/recipes/576693/ # other than to add MIT license header (as specified on page, but not in code). # Linked from Python documentation here: # http://docs.python.org/2/library/collections.html#collections.OrderedDict # # This should be deleted once Py2.7 is available on all bots, see # http://crbug.com/241769. # # Copyright (c) 2009 Raymond Hettinger. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) # Suppress 'OrderedDict.update: Method has no argument': # pylint: disable=E0211 def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
mit
40223202/2015cdb_g2
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_loader.py
738
49593
import sys import types import unittest class Test_TestLoader(unittest.TestCase): ### Tests for TestLoader.loadTestsFromTestCase ################################################################ # "Return a suite of all tests cases contained in the TestCase-derived # class testCaseClass" def test_loadTestsFromTestCase(self): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')]) loader = unittest.TestLoader() self.assertEqual(loader.loadTestsFromTestCase(Foo), tests) # "Return a suite of all tests cases contained in the TestCase-derived # class testCaseClass" # # Make sure it does the right thing even if no tests were found def test_loadTestsFromTestCase__no_matches(self): class Foo(unittest.TestCase): def foo_bar(self): pass empty_suite = unittest.TestSuite() loader = unittest.TestLoader() self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite) # "Return a suite of all tests cases contained in the TestCase-derived # class testCaseClass" # # What happens if loadTestsFromTestCase() is given an object # that isn't a subclass of TestCase? Specifically, what happens # if testCaseClass is a subclass of TestSuite? # # This is checked for specifically in the code, so we better add a # test for it. def test_loadTestsFromTestCase__TestSuite_subclass(self): class NotATestCase(unittest.TestSuite): pass loader = unittest.TestLoader() try: loader.loadTestsFromTestCase(NotATestCase) except TypeError: pass else: self.fail('Should raise TypeError') # "Return a suite of all tests cases contained in the TestCase-derived # class testCaseClass" # # Make sure loadTestsFromTestCase() picks up the default test method # name (as specified by TestCase), even though the method name does # not match the default TestLoader.testMethodPrefix string def test_loadTestsFromTestCase__default_method_name(self): class Foo(unittest.TestCase): def runTest(self): pass loader = unittest.TestLoader() # This has to be false for the test to succeed self.assertFalse('runTest'.startswith(loader.testMethodPrefix)) suite = loader.loadTestsFromTestCase(Foo) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [Foo('runTest')]) ################################################################ ### /Tests for TestLoader.loadTestsFromTestCase ### Tests for TestLoader.loadTestsFromModule ################################################################ # "This method searches `module` for classes derived from TestCase" def test_loadTestsFromModule__TestCase_subclass(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromModule(m) self.assertIsInstance(suite, loader.suiteClass) expected = [loader.suiteClass([MyTestCase('test')])] self.assertEqual(list(suite), expected) # "This method searches `module` for classes derived from TestCase" # # What happens if no tests are found (no TestCase instances)? def test_loadTestsFromModule__no_TestCase_instances(self): m = types.ModuleType('m') loader = unittest.TestLoader() suite = loader.loadTestsFromModule(m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), []) # "This method searches `module` for classes derived from TestCase" # # What happens if no tests are found (TestCases instances, but no tests)? def test_loadTestsFromModule__no_TestCase_tests(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromModule(m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [loader.suiteClass()]) # "This method searches `module` for classes derived from TestCase"s # # What happens if loadTestsFromModule() is given something other # than a module? # # XXX Currently, it succeeds anyway. This flexibility # should either be documented or loadTestsFromModule() should # raise a TypeError # # XXX Certain people are using this behaviour. We'll add a test for it def test_loadTestsFromModule__not_a_module(self): class MyTestCase(unittest.TestCase): def test(self): pass class NotAModule(object): test_2 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromModule(NotAModule) reference = [unittest.TestSuite([MyTestCase('test')])] self.assertEqual(list(suite), reference) # Check that loadTestsFromModule honors (or not) a module # with a load_tests function. def test_loadTestsFromModule__load_tests(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase load_tests_args = [] def load_tests(loader, tests, pattern): self.assertIsInstance(tests, unittest.TestSuite) load_tests_args.extend((loader, tests, pattern)) return tests m.load_tests = load_tests loader = unittest.TestLoader() suite = loader.loadTestsFromModule(m) self.assertIsInstance(suite, unittest.TestSuite) self.assertEqual(load_tests_args, [loader, suite, None]) load_tests_args = [] suite = loader.loadTestsFromModule(m, use_load_tests=False) self.assertEqual(load_tests_args, []) def test_loadTestsFromModule__faulty_load_tests(self): m = types.ModuleType('m') def load_tests(loader, tests, pattern): raise TypeError('some failure') m.load_tests = load_tests loader = unittest.TestLoader() suite = loader.loadTestsFromModule(m) self.assertIsInstance(suite, unittest.TestSuite) self.assertEqual(suite.countTestCases(), 1) test = list(suite)[0] self.assertRaisesRegex(TypeError, "some failure", test.m) ################################################################ ### /Tests for TestLoader.loadTestsFromModule() ### Tests for TestLoader.loadTestsFromName() ################################################################ # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # Is ValueError raised in response to an empty name? def test_loadTestsFromName__empty_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromName('') except ValueError as e: self.assertEqual(str(e), "Empty module name") else: self.fail("TestLoader.loadTestsFromName failed to raise ValueError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # What happens when the name contains invalid characters? def test_loadTestsFromName__malformed_name(self): loader = unittest.TestLoader() # XXX Should this raise ValueError or ImportError? try: loader.loadTestsFromName('abc () //') except ValueError: pass except ImportError: pass else: self.fail("TestLoader.loadTestsFromName failed to raise ValueError") # "The specifier name is a ``dotted name'' that may resolve ... to a # module" # # What happens when a module by that name can't be found? def test_loadTestsFromName__unknown_module_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromName('sdasfasfasdf') except ImportError as e: self.assertEqual(str(e), "No module named 'sdasfasfasdf'") else: self.fail("TestLoader.loadTestsFromName failed to raise ImportError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # What happens when the module is found, but the attribute can't? def test_loadTestsFromName__unknown_attr_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromName('unittest.sdasfasfasdf') except AttributeError as e: self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'") else: self.fail("TestLoader.loadTestsFromName failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # What happens when we provide the module, but the attribute can't be # found? def test_loadTestsFromName__relative_unknown_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromName('sdasfasfasdf', unittest) except AttributeError as e: self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'") else: self.fail("TestLoader.loadTestsFromName failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # ... # "The method optionally resolves name relative to the given module" # # Does loadTestsFromName raise ValueError when passed an empty # name relative to a provided module? # # XXX Should probably raise a ValueError instead of an AttributeError def test_loadTestsFromName__relative_empty_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromName('', unittest) except AttributeError as e: pass else: self.fail("Failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # ... # "The method optionally resolves name relative to the given module" # # What happens when an impossible name is given, relative to the provided # `module`? def test_loadTestsFromName__relative_malformed_name(self): loader = unittest.TestLoader() # XXX Should this raise AttributeError or ValueError? try: loader.loadTestsFromName('abc () //', unittest) except ValueError: pass except AttributeError: pass else: self.fail("TestLoader.loadTestsFromName failed to raise ValueError") # "The method optionally resolves name relative to the given module" # # Does loadTestsFromName raise TypeError when the `module` argument # isn't a module object? # # XXX Accepts the not-a-module object, ignorning the object's type # This should raise an exception or the method name should be changed # # XXX Some people are relying on this, so keep it for now def test_loadTestsFromName__relative_not_a_module(self): class MyTestCase(unittest.TestCase): def test(self): pass class NotAModule(object): test_2 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromName('test_2', NotAModule) reference = [MyTestCase('test')] self.assertEqual(list(suite), reference) # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # Does it raise an exception if the name resolves to an invalid # object? def test_loadTestsFromName__relative_bad_object(self): m = types.ModuleType('m') m.testcase_1 = object() loader = unittest.TestLoader() try: loader.loadTestsFromName('testcase_1', m) except TypeError: pass else: self.fail("Should have raised TypeError") # "The specifier name is a ``dotted name'' that may # resolve either to ... a test case class" def test_loadTestsFromName__relative_TestCase_subclass(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromName('testcase_1', m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [MyTestCase('test')]) # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." def test_loadTestsFromName__relative_TestSuite(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testsuite = unittest.TestSuite([MyTestCase('test')]) loader = unittest.TestLoader() suite = loader.loadTestsFromName('testsuite', m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [MyTestCase('test')]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a test method within a test case class" def test_loadTestsFromName__relative_testmethod(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromName('testcase_1.test', m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [MyTestCase('test')]) # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # Does loadTestsFromName() raise the proper exception when trying to # resolve "a test method within a test case class" that doesn't exist # for the given name (relative to a provided module)? def test_loadTestsFromName__relative_invalid_testmethod(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() try: loader.loadTestsFromName('testcase_1.testfoo', m) except AttributeError as e: self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'") else: self.fail("Failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a ... TestSuite instance" def test_loadTestsFromName__callable__TestSuite(self): m = types.ModuleType('m') testcase_1 = unittest.FunctionTestCase(lambda: None) testcase_2 = unittest.FunctionTestCase(lambda: None) def return_TestSuite(): return unittest.TestSuite([testcase_1, testcase_2]) m.return_TestSuite = return_TestSuite loader = unittest.TestLoader() suite = loader.loadTestsFromName('return_TestSuite', m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [testcase_1, testcase_2]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a TestCase ... instance" def test_loadTestsFromName__callable__TestCase_instance(self): m = types.ModuleType('m') testcase_1 = unittest.FunctionTestCase(lambda: None) def return_TestCase(): return testcase_1 m.return_TestCase = return_TestCase loader = unittest.TestLoader() suite = loader.loadTestsFromName('return_TestCase', m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [testcase_1]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a TestCase ... instance" #***************************************************************** #Override the suiteClass attribute to ensure that the suiteClass #attribute is used def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self): class SubTestSuite(unittest.TestSuite): pass m = types.ModuleType('m') testcase_1 = unittest.FunctionTestCase(lambda: None) def return_TestCase(): return testcase_1 m.return_TestCase = return_TestCase loader = unittest.TestLoader() loader.suiteClass = SubTestSuite suite = loader.loadTestsFromName('return_TestCase', m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [testcase_1]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a test method within a test case class" #***************************************************************** #Override the suiteClass attribute to ensure that the suiteClass #attribute is used def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self): class SubTestSuite(unittest.TestSuite): pass m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() loader.suiteClass=SubTestSuite suite = loader.loadTestsFromName('testcase_1.test', m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [MyTestCase('test')]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a TestCase or TestSuite instance" # # What happens if the callable returns something else? def test_loadTestsFromName__callable__wrong_type(self): m = types.ModuleType('m') def return_wrong(): return 6 m.return_wrong = return_wrong loader = unittest.TestLoader() try: suite = loader.loadTestsFromName('return_wrong', m) except TypeError: pass else: self.fail("TestLoader.loadTestsFromName failed to raise TypeError") # "The specifier can refer to modules and packages which have not been # imported; they will be imported as a side-effect" def test_loadTestsFromName__module_not_loaded(self): # We're going to try to load this module as a side-effect, so it # better not be loaded before we try. # module_name = 'unittest.test.dummy' sys.modules.pop(module_name, None) loader = unittest.TestLoader() try: suite = loader.loadTestsFromName(module_name) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), []) # module should now be loaded, thanks to loadTestsFromName() self.assertIn(module_name, sys.modules) finally: if module_name in sys.modules: del sys.modules[module_name] ################################################################ ### Tests for TestLoader.loadTestsFromName() ### Tests for TestLoader.loadTestsFromNames() ################################################################ # "Similar to loadTestsFromName(), but takes a sequence of names rather # than a single name." # # What happens if that sequence of names is empty? def test_loadTestsFromNames__empty_name_list(self): loader = unittest.TestLoader() suite = loader.loadTestsFromNames([]) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), []) # "Similar to loadTestsFromName(), but takes a sequence of names rather # than a single name." # ... # "The method optionally resolves name relative to the given module" # # What happens if that sequence of names is empty? # # XXX Should this raise a ValueError or just return an empty TestSuite? def test_loadTestsFromNames__relative_empty_name_list(self): loader = unittest.TestLoader() suite = loader.loadTestsFromNames([], unittest) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), []) # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # Is ValueError raised in response to an empty name? def test_loadTestsFromNames__empty_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromNames(['']) except ValueError as e: self.assertEqual(str(e), "Empty module name") else: self.fail("TestLoader.loadTestsFromNames failed to raise ValueError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # What happens when presented with an impossible module name? def test_loadTestsFromNames__malformed_name(self): loader = unittest.TestLoader() # XXX Should this raise ValueError or ImportError? try: loader.loadTestsFromNames(['abc () //']) except ValueError: pass except ImportError: pass else: self.fail("TestLoader.loadTestsFromNames failed to raise ValueError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # What happens when no module can be found for the given name? def test_loadTestsFromNames__unknown_module_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromNames(['sdasfasfasdf']) except ImportError as e: self.assertEqual(str(e), "No module named 'sdasfasfasdf'") else: self.fail("TestLoader.loadTestsFromNames failed to raise ImportError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # What happens when the module can be found, but not the attribute? def test_loadTestsFromNames__unknown_attr_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest']) except AttributeError as e: self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'") else: self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # ... # "The method optionally resolves name relative to the given module" # # What happens when given an unknown attribute on a specified `module` # argument? def test_loadTestsFromNames__unknown_name_relative_1(self): loader = unittest.TestLoader() try: loader.loadTestsFromNames(['sdasfasfasdf'], unittest) except AttributeError as e: self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'") else: self.fail("TestLoader.loadTestsFromName failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # ... # "The method optionally resolves name relative to the given module" # # Do unknown attributes (relative to a provided module) still raise an # exception even in the presence of valid attribute names? def test_loadTestsFromNames__unknown_name_relative_2(self): loader = unittest.TestLoader() try: loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest) except AttributeError as e: self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'") else: self.fail("TestLoader.loadTestsFromName failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # ... # "The method optionally resolves name relative to the given module" # # What happens when faced with the empty string? # # XXX This currently raises AttributeError, though ValueError is probably # more appropriate def test_loadTestsFromNames__relative_empty_name(self): loader = unittest.TestLoader() try: loader.loadTestsFromNames([''], unittest) except AttributeError: pass else: self.fail("Failed to raise ValueError") # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # ... # "The method optionally resolves name relative to the given module" # # What happens when presented with an impossible attribute name? def test_loadTestsFromNames__relative_malformed_name(self): loader = unittest.TestLoader() # XXX Should this raise AttributeError or ValueError? try: loader.loadTestsFromNames(['abc () //'], unittest) except AttributeError: pass except ValueError: pass else: self.fail("TestLoader.loadTestsFromNames failed to raise ValueError") # "The method optionally resolves name relative to the given module" # # Does loadTestsFromNames() make sure the provided `module` is in fact # a module? # # XXX This validation is currently not done. This flexibility should # either be documented or a TypeError should be raised. def test_loadTestsFromNames__relative_not_a_module(self): class MyTestCase(unittest.TestCase): def test(self): pass class NotAModule(object): test_2 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromNames(['test_2'], NotAModule) reference = [unittest.TestSuite([MyTestCase('test')])] self.assertEqual(list(suite), reference) # "The specifier name is a ``dotted name'' that may resolve either to # a module, a test case class, a TestSuite instance, a test method # within a test case class, or a callable object which returns a # TestCase or TestSuite instance." # # Does it raise an exception if the name resolves to an invalid # object? def test_loadTestsFromNames__relative_bad_object(self): m = types.ModuleType('m') m.testcase_1 = object() loader = unittest.TestLoader() try: loader.loadTestsFromNames(['testcase_1'], m) except TypeError: pass else: self.fail("Should have raised TypeError") # "The specifier name is a ``dotted name'' that may resolve ... to # ... a test case class" def test_loadTestsFromNames__relative_TestCase_subclass(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromNames(['testcase_1'], m) self.assertIsInstance(suite, loader.suiteClass) expected = loader.suiteClass([MyTestCase('test')]) self.assertEqual(list(suite), [expected]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a TestSuite instance" def test_loadTestsFromNames__relative_TestSuite(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testsuite = unittest.TestSuite([MyTestCase('test')]) loader = unittest.TestLoader() suite = loader.loadTestsFromNames(['testsuite'], m) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [m.testsuite]) # "The specifier name is a ``dotted name'' that may resolve ... to ... a # test method within a test case class" def test_loadTestsFromNames__relative_testmethod(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() suite = loader.loadTestsFromNames(['testcase_1.test'], m) self.assertIsInstance(suite, loader.suiteClass) ref_suite = unittest.TestSuite([MyTestCase('test')]) self.assertEqual(list(suite), [ref_suite]) # "The specifier name is a ``dotted name'' that may resolve ... to ... a # test method within a test case class" # # Does the method gracefully handle names that initially look like they # resolve to "a test method within a test case class" but don't? def test_loadTestsFromNames__relative_invalid_testmethod(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): def test(self): pass m.testcase_1 = MyTestCase loader = unittest.TestLoader() try: loader.loadTestsFromNames(['testcase_1.testfoo'], m) except AttributeError as e: self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'") else: self.fail("Failed to raise AttributeError") # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a ... TestSuite instance" def test_loadTestsFromNames__callable__TestSuite(self): m = types.ModuleType('m') testcase_1 = unittest.FunctionTestCase(lambda: None) testcase_2 = unittest.FunctionTestCase(lambda: None) def return_TestSuite(): return unittest.TestSuite([testcase_1, testcase_2]) m.return_TestSuite = return_TestSuite loader = unittest.TestLoader() suite = loader.loadTestsFromNames(['return_TestSuite'], m) self.assertIsInstance(suite, loader.suiteClass) expected = unittest.TestSuite([testcase_1, testcase_2]) self.assertEqual(list(suite), [expected]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a TestCase ... instance" def test_loadTestsFromNames__callable__TestCase_instance(self): m = types.ModuleType('m') testcase_1 = unittest.FunctionTestCase(lambda: None) def return_TestCase(): return testcase_1 m.return_TestCase = return_TestCase loader = unittest.TestLoader() suite = loader.loadTestsFromNames(['return_TestCase'], m) self.assertIsInstance(suite, loader.suiteClass) ref_suite = unittest.TestSuite([testcase_1]) self.assertEqual(list(suite), [ref_suite]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a TestCase or TestSuite instance" # # Are staticmethods handled correctly? def test_loadTestsFromNames__callable__call_staticmethod(self): m = types.ModuleType('m') class Test1(unittest.TestCase): def test(self): pass testcase_1 = Test1('test') class Foo(unittest.TestCase): @staticmethod def foo(): return testcase_1 m.Foo = Foo loader = unittest.TestLoader() suite = loader.loadTestsFromNames(['Foo.foo'], m) self.assertIsInstance(suite, loader.suiteClass) ref_suite = unittest.TestSuite([testcase_1]) self.assertEqual(list(suite), [ref_suite]) # "The specifier name is a ``dotted name'' that may resolve ... to # ... a callable object which returns a TestCase or TestSuite instance" # # What happens when the callable returns something else? def test_loadTestsFromNames__callable__wrong_type(self): m = types.ModuleType('m') def return_wrong(): return 6 m.return_wrong = return_wrong loader = unittest.TestLoader() try: suite = loader.loadTestsFromNames(['return_wrong'], m) except TypeError: pass else: self.fail("TestLoader.loadTestsFromNames failed to raise TypeError") # "The specifier can refer to modules and packages which have not been # imported; they will be imported as a side-effect" def test_loadTestsFromNames__module_not_loaded(self): # We're going to try to load this module as a side-effect, so it # better not be loaded before we try. # module_name = 'unittest.test.dummy' sys.modules.pop(module_name, None) loader = unittest.TestLoader() try: suite = loader.loadTestsFromNames([module_name]) self.assertIsInstance(suite, loader.suiteClass) self.assertEqual(list(suite), [unittest.TestSuite()]) # module should now be loaded, thanks to loadTestsFromName() self.assertIn(module_name, sys.modules) finally: if module_name in sys.modules: del sys.modules[module_name] ################################################################ ### /Tests for TestLoader.loadTestsFromNames() ### Tests for TestLoader.getTestCaseNames() ################################################################ # "Return a sorted sequence of method names found within testCaseClass" # # Test.foobar is defined to make sure getTestCaseNames() respects # loader.testMethodPrefix def test_getTestCaseNames(self): class Test(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foobar(self): pass loader = unittest.TestLoader() self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2']) # "Return a sorted sequence of method names found within testCaseClass" # # Does getTestCaseNames() behave appropriately if no tests are found? def test_getTestCaseNames__no_tests(self): class Test(unittest.TestCase): def foobar(self): pass loader = unittest.TestLoader() self.assertEqual(loader.getTestCaseNames(Test), []) # "Return a sorted sequence of method names found within testCaseClass" # # Are not-TestCases handled gracefully? # # XXX This should raise a TypeError, not return a list # # XXX It's too late in the 2.5 release cycle to fix this, but it should # probably be revisited for 2.6 def test_getTestCaseNames__not_a_TestCase(self): class BadCase(int): def test_foo(self): pass loader = unittest.TestLoader() names = loader.getTestCaseNames(BadCase) self.assertEqual(names, ['test_foo']) # "Return a sorted sequence of method names found within testCaseClass" # # Make sure inherited names are handled. # # TestP.foobar is defined to make sure getTestCaseNames() respects # loader.testMethodPrefix def test_getTestCaseNames__inheritance(self): class TestP(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foobar(self): pass class TestC(TestP): def test_1(self): pass def test_3(self): pass loader = unittest.TestLoader() names = ['test_1', 'test_2', 'test_3'] self.assertEqual(loader.getTestCaseNames(TestC), names) ################################################################ ### /Tests for TestLoader.getTestCaseNames() ### Tests for TestLoader.testMethodPrefix ################################################################ # "String giving the prefix of method names which will be interpreted as # test methods" # # Implicit in the documentation is that testMethodPrefix is respected by # all loadTestsFrom* methods. def test_testMethodPrefix__loadTestsFromTestCase(self): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass tests_1 = unittest.TestSuite([Foo('foo_bar')]) tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')]) loader = unittest.TestLoader() loader.testMethodPrefix = 'foo' self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1) loader.testMethodPrefix = 'test' self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2) # "String giving the prefix of method names which will be interpreted as # test methods" # # Implicit in the documentation is that testMethodPrefix is respected by # all loadTestsFrom* methods. def test_testMethodPrefix__loadTestsFromModule(self): m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass m.Foo = Foo tests_1 = [unittest.TestSuite([Foo('foo_bar')])] tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])] loader = unittest.TestLoader() loader.testMethodPrefix = 'foo' self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1) loader.testMethodPrefix = 'test' self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2) # "String giving the prefix of method names which will be interpreted as # test methods" # # Implicit in the documentation is that testMethodPrefix is respected by # all loadTestsFrom* methods. def test_testMethodPrefix__loadTestsFromName(self): m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass m.Foo = Foo tests_1 = unittest.TestSuite([Foo('foo_bar')]) tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')]) loader = unittest.TestLoader() loader.testMethodPrefix = 'foo' self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1) loader.testMethodPrefix = 'test' self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2) # "String giving the prefix of method names which will be interpreted as # test methods" # # Implicit in the documentation is that testMethodPrefix is respected by # all loadTestsFrom* methods. def test_testMethodPrefix__loadTestsFromNames(self): m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass m.Foo = Foo tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])]) tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')]) tests_2 = unittest.TestSuite([tests_2]) loader = unittest.TestLoader() loader.testMethodPrefix = 'foo' self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1) loader.testMethodPrefix = 'test' self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2) # "The default value is 'test'" def test_testMethodPrefix__default_value(self): loader = unittest.TestLoader() self.assertEqual(loader.testMethodPrefix, 'test') ################################################################ ### /Tests for TestLoader.testMethodPrefix ### Tests for TestLoader.sortTestMethodsUsing ################################################################ # "Function to be used to compare method names when sorting them in # getTestCaseNames() and all the loadTestsFromX() methods" def test_sortTestMethodsUsing__loadTestsFromTestCase(self): def reversed_cmp(x, y): return -((x > y) - (x < y)) class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass loader = unittest.TestLoader() loader.sortTestMethodsUsing = reversed_cmp tests = loader.suiteClass([Foo('test_2'), Foo('test_1')]) self.assertEqual(loader.loadTestsFromTestCase(Foo), tests) # "Function to be used to compare method names when sorting them in # getTestCaseNames() and all the loadTestsFromX() methods" def test_sortTestMethodsUsing__loadTestsFromModule(self): def reversed_cmp(x, y): return -((x > y) - (x < y)) m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass m.Foo = Foo loader = unittest.TestLoader() loader.sortTestMethodsUsing = reversed_cmp tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])] self.assertEqual(list(loader.loadTestsFromModule(m)), tests) # "Function to be used to compare method names when sorting them in # getTestCaseNames() and all the loadTestsFromX() methods" def test_sortTestMethodsUsing__loadTestsFromName(self): def reversed_cmp(x, y): return -((x > y) - (x < y)) m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass m.Foo = Foo loader = unittest.TestLoader() loader.sortTestMethodsUsing = reversed_cmp tests = loader.suiteClass([Foo('test_2'), Foo('test_1')]) self.assertEqual(loader.loadTestsFromName('Foo', m), tests) # "Function to be used to compare method names when sorting them in # getTestCaseNames() and all the loadTestsFromX() methods" def test_sortTestMethodsUsing__loadTestsFromNames(self): def reversed_cmp(x, y): return -((x > y) - (x < y)) m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass m.Foo = Foo loader = unittest.TestLoader() loader.sortTestMethodsUsing = reversed_cmp tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])] self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests) # "Function to be used to compare method names when sorting them in # getTestCaseNames()" # # Does it actually affect getTestCaseNames()? def test_sortTestMethodsUsing__getTestCaseNames(self): def reversed_cmp(x, y): return -((x > y) - (x < y)) class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass loader = unittest.TestLoader() loader.sortTestMethodsUsing = reversed_cmp test_names = ['test_2', 'test_1'] self.assertEqual(loader.getTestCaseNames(Foo), test_names) # "The default value is the built-in cmp() function" # Since cmp is now defunct, we simply verify that the results # occur in the same order as they would with the default sort. def test_sortTestMethodsUsing__default_value(self): loader = unittest.TestLoader() class Foo(unittest.TestCase): def test_2(self): pass def test_3(self): pass def test_1(self): pass test_names = ['test_2', 'test_3', 'test_1'] self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names)) # "it can be set to None to disable the sort." # # XXX How is this different from reassigning cmp? Are the tests returned # in a random order or something? This behaviour should die def test_sortTestMethodsUsing__None(self): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass loader = unittest.TestLoader() loader.sortTestMethodsUsing = None test_names = ['test_2', 'test_1'] self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names)) ################################################################ ### /Tests for TestLoader.sortTestMethodsUsing ### Tests for TestLoader.suiteClass ################################################################ # "Callable object that constructs a test suite from a list of tests." def test_suiteClass__loadTestsFromTestCase(self): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass tests = [Foo('test_1'), Foo('test_2')] loader = unittest.TestLoader() loader.suiteClass = list self.assertEqual(loader.loadTestsFromTestCase(Foo), tests) # It is implicit in the documentation for TestLoader.suiteClass that # all TestLoader.loadTestsFrom* methods respect it. Let's make sure def test_suiteClass__loadTestsFromModule(self): m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass m.Foo = Foo tests = [[Foo('test_1'), Foo('test_2')]] loader = unittest.TestLoader() loader.suiteClass = list self.assertEqual(loader.loadTestsFromModule(m), tests) # It is implicit in the documentation for TestLoader.suiteClass that # all TestLoader.loadTestsFrom* methods respect it. Let's make sure def test_suiteClass__loadTestsFromName(self): m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass m.Foo = Foo tests = [Foo('test_1'), Foo('test_2')] loader = unittest.TestLoader() loader.suiteClass = list self.assertEqual(loader.loadTestsFromName('Foo', m), tests) # It is implicit in the documentation for TestLoader.suiteClass that # all TestLoader.loadTestsFrom* methods respect it. Let's make sure def test_suiteClass__loadTestsFromNames(self): m = types.ModuleType('m') class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def foo_bar(self): pass m.Foo = Foo tests = [[Foo('test_1'), Foo('test_2')]] loader = unittest.TestLoader() loader.suiteClass = list self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests) # "The default value is the TestSuite class" def test_suiteClass__default_value(self): loader = unittest.TestLoader() self.assertTrue(loader.suiteClass is unittest.TestSuite)
gpl-3.0
JJediny/python-social-auth
social/tests/backends/test_box.py
80
2294
import json from social.tests.backends.oauth import OAuth2Test class BoxOAuth2Test(OAuth2Test): backend_path = 'social.backends.box.BoxOAuth2' user_data_url = 'https://api.box.com/2.0/users/me' expected_username = '[email protected]' access_token_body = json.dumps({ 'access_token': 'T9cE5asGnuyYCCqIZFoWjFHvNbvVqHjl', 'expires_in': 3600, 'restricted_to': [], 'token_type': 'bearer', 'refresh_token': 'J7rxTiWOHMoSC1isKZKBZWizoRXjkQzig5C6jFgCVJ9bU' 'nsUfGMinKBDLZWP9BgR' }) user_data_body = json.dumps({ 'type': 'user', 'id': '181216415', 'name': 'sean rose', 'login': '[email protected]', 'created_at': '2012-05-03T21:39:11-07:00', 'modified_at': '2012-11-14T11:21:32-08:00', 'role': 'admin', 'language': 'en', 'space_amount': 11345156112, 'space_used': 1237009912, 'max_upload_size': 2147483648, 'tracking_codes': [], 'can_see_managed_users': True, 'is_sync_enabled': True, 'status': 'active', 'job_title': '', 'phone': '6509241374', 'address': '', 'avatar_url': 'https://www.box.com/api/avatar/large/181216415', 'is_exempt_from_device_limits': False, 'is_exempt_from_login_verification': False, 'enterprise': { 'type': 'enterprise', 'id': '17077211', 'name': 'seanrose enterprise' } }) refresh_token_body = json.dumps({ 'access_token': 'T9cE5asGnuyYCCqIZFoWjFHvNbvVqHjl', 'expires_in': 3600, 'restricted_to': [], 'token_type': 'bearer', 'refresh_token': 'J7rxTiWOHMoSC1isKZKBZWizoRXjkQzig5C6jFgCVJ9b' 'UnsUfGMinKBDLZWP9BgR' }) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline() def refresh_token_arguments(self): uri = self.strategy.build_absolute_uri('/complete/box/') return {'redirect_uri': uri} def test_refresh_token(self): user, social = self.do_refresh_token() self.assertEqual(social.extra_data['access_token'], 'T9cE5asGnuyYCCqIZFoWjFHvNbvVqHjl')
bsd-3-clause
jlord/pagination-test
node_modules/browserify/node_modules/syntax-error/node_modules/esprima/tools/generate-unicode-regex.py
260
4932
#!/usr/bin/python # -*- coding: utf-8 -*- # By Yusuke Suzuki <[email protected]> # Modified by Mathias Bynens <http://mathiasbynens.be/> # http://code.google.com/p/esprima/issues/detail?id=110 import sys import string import re class RegExpGenerator(object): def __init__(self, detector): self.detector = detector def generate_identifier_start(self): r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)] return self._generate_range(r) def generate_identifier_part(self): r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)] return self._generate_range(r) def generate_non_ascii_identifier_start(self): r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)] return self._generate_range(r) def generate_non_ascii_identifier_part(self): r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)] return self._generate_range(r) def generate_non_ascii_separator_space(self): r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)] return self._generate_range(r) def _generate_range(self, r): if len(r) == 0: return '[]' buf = [] start = r[0] end = r[0] predict = start + 1 r = r[1:] for code in r: if predict == code: end = code predict = code + 1 continue else: if start == end: buf.append("\\u%04X" % start) elif end == start + 1: buf.append("\\u%04X\\u%04X" % (start, end)) else: buf.append("\\u%04X-\\u%04X" % (start, end)) start = code end = code predict = code + 1 if start == end: buf.append("\\u%04X" % start) else: buf.append("\\u%04X-\\u%04X" % (start, end)) return '[' + ''.join(buf) + ']' class Detector(object): def __init__(self, data): self.data = data def is_ascii(self, ch): return ch < 0x80 def is_ascii_alpha(self, ch): v = ch | 0x20 return v >= ord('a') and v <= ord('z') def is_decimal_digit(self, ch): return ch >= ord('0') and ch <= ord('9') def is_octal_digit(self, ch): return ch >= ord('0') and ch <= ord('7') def is_hex_digit(self, ch): v = ch | 0x20 return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f')) def is_digit(self, ch): return self.is_decimal_digit(ch) or self.data[ch] == 'Nd' def is_ascii_alphanumeric(self, ch): return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch) def _is_non_ascii_identifier_start(self, ch): c = self.data[ch] return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' def _is_non_ascii_identifier_part(self, ch): c = self.data[ch] return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D def is_separator_space(self, ch): return self.data[ch] == 'Zs' def is_white_space(self, ch): return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch) def is_line_terminator(self, ch): return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch) def is_line_or_paragraph_terminator(self, ch): return ch == 0x2028 or ch == 0x2029 def is_identifier_start(self, ch): if self.is_ascii(ch): return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch) return self._is_non_ascii_identifier_start(ch) def is_identifier_part(self, ch): if self.is_ascii(ch): return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch) return self._is_non_ascii_identifier_part(ch) def analyze(source): data = [] dictionary = {} with open(source) as uni: flag = False first = 0 for line in uni: d = string.split(line.strip(), ";") val = int(d[0], 16) if flag: if re.compile("<.+, Last>").match(d[1]): # print "%s : u%X" % (d[1], val) flag = False for t in range(first, val+1): dictionary[t] = str(d[2]) else: raise "Database Exception" else: if re.compile("<.+, First>").match(d[1]): # print "%s : u%X" % (d[1], val) flag = True first = val else: dictionary[val] = str(d[2]) for i in range(0xFFFF + 1): if dictionary.get(i) == None: data.append("Un") else: data.append(dictionary[i]) return RegExpGenerator(Detector(data)) def main(source): generator = analyze(source) print generator.generate_non_ascii_identifier_start() print generator.generate_non_ascii_identifier_part() print generator.generate_non_ascii_separator_space() if __name__ == '__main__': main(sys.argv[1])
bsd-3-clause
aljohnso/Market_Dreams
Stock Class.py
1
1179
class Stock: def __init__(self, Company_name): """ the constructor for the TextModel class all dictionaries are started at empty the name is just for our own purposes, to keep things organized """ self.Company_name = Company_name self.date_time = {} # starts empty self.ticker_name = {} self.history = {} self.price = {} self.certinty = {} self.price_change = {} # you will want another dictionary for your text feature def __repr__(self): """ this method creates the string version of TextModel objects """ s = "\nModel name: " + str(self.name) + "\n" s += " n. of words: " + str(len(self.words)) + "\n" s += " n. of word lengths: " + str(len(self.wordlengths)) + "\n" s += " n. of sentence lengths: " + str(len(self.sentencelengths)) + "\n" s += " n. of stems: " + str(len(self.stems)) + "\n" # you will likely want another line for your custom text-feature! return s
mit
ContinuumIO/numpy
numpy/random/__init__.py
84
5492
""" ======================== Random Number Generation ======================== ==================== ========================================================= Utility functions ============================================================================== random Uniformly distributed values of a given shape. bytes Uniformly distributed random bytes. random_integers Uniformly distributed integers in a given range. random_sample Uniformly distributed floats in a given range. random Alias for random_sample ranf Alias for random_sample sample Alias for random_sample choice Generate a weighted random sample from a given array-like permutation Randomly permute a sequence / generate a random sequence. shuffle Randomly permute a sequence in place. seed Seed the random number generator. ==================== ========================================================= ==================== ========================================================= Compatibility functions ============================================================================== rand Uniformly distributed values. randn Normally distributed values. ranf Uniformly distributed floating point numbers. randint Uniformly distributed integers in a given range. ==================== ========================================================= ==================== ========================================================= Univariate distributions ============================================================================== beta Beta distribution over ``[0, 1]``. binomial Binomial distribution. chisquare :math:`\\chi^2` distribution. exponential Exponential distribution. f F (Fisher-Snedecor) distribution. gamma Gamma distribution. geometric Geometric distribution. gumbel Gumbel distribution. hypergeometric Hypergeometric distribution. laplace Laplace distribution. logistic Logistic distribution. lognormal Log-normal distribution. logseries Logarithmic series distribution. negative_binomial Negative binomial distribution. noncentral_chisquare Non-central chi-square distribution. noncentral_f Non-central F distribution. normal Normal / Gaussian distribution. pareto Pareto distribution. poisson Poisson distribution. power Power distribution. rayleigh Rayleigh distribution. triangular Triangular distribution. uniform Uniform distribution. vonmises Von Mises circular distribution. wald Wald (inverse Gaussian) distribution. weibull Weibull distribution. zipf Zipf's distribution over ranked data. ==================== ========================================================= ==================== ========================================================= Multivariate distributions ============================================================================== dirichlet Multivariate generalization of Beta distribution. multinomial Multivariate generalization of the binomial distribution. multivariate_normal Multivariate generalization of the normal distribution. ==================== ========================================================= ==================== ========================================================= Standard distributions ============================================================================== standard_cauchy Standard Cauchy-Lorentz distribution. standard_exponential Standard exponential distribution. standard_gamma Standard Gamma distribution. standard_normal Standard normal distribution. standard_t Standard Student's t-distribution. ==================== ========================================================= ==================== ========================================================= Internal functions ============================================================================== get_state Get tuple representing internal state of generator. set_state Set state of generator. ==================== ========================================================= """ from __future__ import division, absolute_import, print_function import warnings # To get sub-modules from .info import __doc__, __all__ with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="numpy.ndarray size changed") from .mtrand import * # Some aliases: ranf = random = sample = random_sample __all__.extend(['ranf', 'random', 'sample']) def __RandomState_ctor(): """Return a RandomState instance. This function exists solely to assist (un)pickling. Note that the state of the RandomState returned here is irrelevant, as this function's entire purpose is to return a newly allocated RandomState whose state pickle can set. Consequently the RandomState returned by this function is a freshly allocated copy with a seed=0. See https://github.com/numpy/numpy/issues/4763 for a detailed discussion """ return RandomState(seed=0) from numpy.testing.nosetester import _numpy_tester test = _numpy_tester().test bench = _numpy_tester().bench
bsd-3-clause
xialin/cs4221
hello/views.py
1
6857
from django.shortcuts import render, redirect from django.conf import settings import textwrap from converter import convert_xml_to_json from converter import update_primary_key_in_xml, merge_relationship_in_xml, validate_xml import lxml.etree as etree from django.http import HttpResponse from django.views.generic.base import View class HomePageView(View): # TODO: allow user to edit XML raw file @staticmethod def dispatch(request, *args, **kwargs): response_text = textwrap.dedent('''\ <html> <head> <title>CS4221 Demo</title> </head> <body> <h1>Group 6</h1> <p>Hello, world!</p> </body> </html> ''') return HttpResponse(response_text) def homePage(request): return render(request, 'index.html', {}) def user_manual(request): return render(request, 'user_manual.html', {}) def documentation(request): return render(request, 'documentation.html', {}) def download(request): if request.method == 'POST' and request.session.get('output_json'): output_json = request.session.get('output_json') response = HttpResponse(output_json, content_type='application/json') response['Content-Disposition'] = 'attachment; filename=export.json' return response else: print "invalid request" uploaded_file_error = "Invalid Request!" return render(request, 'upload.html', { 'uploaded_file_error': uploaded_file_error }) def upload(request): """ Allow user to upload ER object (XML) :param request: :return: """ if request.method == 'POST' and request.FILES['er_file']: er_file = request.FILES['er_file'] filetypes = er_file.content_type.split('/') filetype = ''; if len(filetypes) == 2: filetype = filetypes[1] print filetype if not filetype or "XML" != filetype.upper(): uploaded_file_error = "Uploaded file type is not supported." return render(request, 'upload.html', { 'uploaded_file_error': uploaded_file_error }) try: er_tree = etree.parse(er_file) file_content = etree.tostring(er_tree, pretty_print=True) except Exception: return render(request, 'upload.html', { 'uploaded_file_error': "The uploaded xml is invalid." }) request.session['xmlContent'] = file_content request.session.save() print "upload successful >>>" return render(request, 'display_uploaded_file.html', { 'uploaded_file_content': file_content }) return render(request, 'upload.html') def generate(request): """ User click generate button :param request: :return: """ if request.method == 'POST' and request.session.get('xmlContent'): xml_content = request.session.get('xmlContent') return render(request, 'display_uploaded_file.html', { 'uploaded_file_content': xml_content }) else: print "generate failed >>>" uploaded_file_error = "Uploaded file is not found." return render(request, 'upload.html', { 'uploaded_file_error': uploaded_file_error }) def choose_key(request): """ Prompt user to choose primary key :param request: :return: """ if request.method == 'POST' and request.session.get('xmlContent'): xml_content = request.session.get('xmlContent') xml_file = etree.fromstring(xml_content) return validate_xml(request, xml_file) # return render(request, 'choose_key.html', { # 'uploaded_file_content': "test" + etree.tostring(xmlFile, pretty_print = True) # }) else: uploaded_file_error = "Uploaded File is not found." return render(request, 'upload.html', { 'uploaded_file_error': uploaded_file_error }) def choose_merge(request): """ Prompt user to merge a relationship into another table :param request: :return: """ if request.method == 'POST' and request.session.get('xmlContent'): xml_content = request.session.get('xmlContent') xml_file = etree.fromstring(xml_content) return convert_xml_to_json(request, xml_file) # return render(request, 'choose_key.html', { # 'uploaded_file_content': "test" + etree.tostring(xmlFile, pretty_print = True) # }) else: uploaded_file_error = "Uploaded File is not found." return render(request, 'upload.html', { 'uploaded_file_error': uploaded_file_error }) def proceed_next(request): """ Allow user to select primary key This method will auto proceed next table after user select a primary key :param request: :return: """ if request.method == 'POST' and request.session.get('xmlContent'): xml_content = request.session.get('xmlContent') tree = etree.fromstring(xml_content) table_name = request.POST.get('tableName', None) table_primary_key = request.POST.get('primaryKeyOption', -1) merge_table = request.POST.get('merge_table', -1) merge_from = request.POST.get('merge_from', None) merge_to = request.POST.get('merge_to', None) if table_primary_key != -1 and table_name is not None: """ update primary keys in xml """ tree = update_primary_key_in_xml(tree, table_name, table_primary_key) file_content = etree.tostring(tree, pretty_print=True) request.session['xmlContent'] = file_content request.session.save() return validate_xml(request, tree) elif merge_table != -1 and merge_from is not None and merge_to is not None: """ merge relationship in xml """ tree = merge_relationship_in_xml(tree, merge_table, merge_from, merge_to) file_content = etree.tostring(tree, pretty_print=True) request.session['xmlContent'] = file_content request.session.save() return validate_xml(request, tree) else: # TODO(UI): add an error page and allow restart uploaded_file_error = "Uploaded File is not found." return render(request, 'upload.html', { 'uploaded_file_error': uploaded_file_error }) # return render(request, 'choose_key.html', { # 'uploaded_file_content': "test" + etree.tostring(xmlFile, pretty_print = True) # }) else: return render(request, 'upload.html', { "uploaded_file_error": "Unable to update primary key" })
mit
caseyrollins/osf.io
api_tests/search/serializers/test_serializers.py
4
1986
import pytest from api.search.serializers import SearchSerializer from api_tests import utils from osf.models import RegistrationSchema from osf_tests.factories import ( AuthUserFactory, NodeFactory, ProjectFactory, ) from tests.utils import make_drf_request_with_version, mock_archive from website.project.metadata.schemas import LATEST_SCHEMA_VERSION @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation class TestSearchSerializer: def test_search_serializer_mixed_model(self): user = AuthUserFactory() project = ProjectFactory(creator=user, is_public=True) component = NodeFactory(parent=project, creator=user, is_public=True) file_component = utils.create_test_file(component, user) context = {'request': make_drf_request_with_version(version='2.0')} schema = RegistrationSchema.objects.filter( name='Replication Recipe (Brandt et al., 2013): Post-Completion', schema_version=LATEST_SCHEMA_VERSION).first() # test_search_serializer_mixed_model_project result = SearchSerializer(project, context=context).data assert result['data']['type'] == 'nodes' # test_search_serializer_mixed_model_component result = SearchSerializer(component, context=context).data assert result['data']['type'] == 'nodes' # test_search_serializer_mixed_model_registration with mock_archive(project, autocomplete=True, autoapprove=True, schema=schema) as registration: result = SearchSerializer(registration, context=context).data assert result['data']['type'] == 'registrations' # test_search_serializer_mixed_model_file result = SearchSerializer(file_component, context=context).data assert result['data']['type'] == 'files' # test_search_serializer_mixed_model_user result = SearchSerializer(user, context=context).data assert result['data']['type'] == 'users'
apache-2.0
whs/django
tests/auth_tests/models/custom_user.py
39
3665
from django.contrib.auth.models import ( AbstractBaseUser, AbstractUser, BaseUserManager, Group, Permission, PermissionsMixin, UserManager, ) from django.db import models # The custom user uses email as the unique identifier, and requires # that every user provide a date of birth. This lets us test # changes in username datatype, and non-text required fields. class CustomUserManager(BaseUserManager): def create_user(self, email, date_of_birth, password=None): """ Creates and saves a User with the given email and password. """ if not email: raise ValueError('Users must have an email address') user = self.model( email=self.normalize_email(email), date_of_birth=date_of_birth, ) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, password, date_of_birth): u = self.create_user(email, password=password, date_of_birth=date_of_birth) u.is_admin = True u.save(using=self._db) return u class CustomUser(AbstractBaseUser): email = models.EmailField(verbose_name='email address', max_length=255, unique=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) date_of_birth = models.DateField() custom_objects = CustomUserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['date_of_birth'] def __str__(self): return self.email # Maybe required? def get_group_permissions(self, obj=None): return set() def get_all_permissions(self, obj=None): return set() def has_perm(self, perm, obj=None): return True def has_perms(self, perm_list, obj=None): return True def has_module_perms(self, app_label): return True # Admin required fields @property def is_staff(self): return self.is_admin class RemoveGroupsAndPermissions: """ A context manager to temporarily remove the groups and user_permissions M2M fields from the AbstractUser class, so they don't clash with the related_name sets. """ def __enter__(self): self._old_au_local_m2m = AbstractUser._meta.local_many_to_many self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many groups = models.ManyToManyField(Group, blank=True) groups.contribute_to_class(PermissionsMixin, "groups") user_permissions = models.ManyToManyField(Permission, blank=True) user_permissions.contribute_to_class(PermissionsMixin, "user_permissions") PermissionsMixin._meta.local_many_to_many = [groups, user_permissions] AbstractUser._meta.local_many_to_many = [groups, user_permissions] def __exit__(self, exc_type, exc_value, traceback): AbstractUser._meta.local_many_to_many = self._old_au_local_m2m PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m class CustomUserWithoutIsActiveField(AbstractBaseUser): username = models.CharField(max_length=150, unique=True) email = models.EmailField(unique=True) objects = UserManager() USERNAME_FIELD = 'username' # The extension user is a simple extension of the built-in user class, # adding a required date_of_birth field. This allows us to check for # any hard references to the name "User" in forms/handlers etc. with RemoveGroupsAndPermissions(): class ExtensionUser(AbstractUser): date_of_birth = models.DateField() custom_objects = UserManager() REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
bsd-3-clause
AlfiyaZi/nbviewer
nbviewer/cache.py
9
4982
#----------------------------------------------------------------------------- # Copyright (C) 2013 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- import os import zlib from concurrent.futures import ThreadPoolExecutor from tornado.concurrent import Future from tornado import gen from tornado.httpclient import AsyncHTTPClient from tornado.httputil import url_concat from tornado.log import app_log try: import pylibmc except ImportError: pylibmc = None #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- class MockCache(object): """Mock Cache. Just stores nothing and always return None on get.""" def __init__(self, *args, **kwargs): pass def get(self, key): f = Future() f.set_result(None) return f def set(self, key, value, *args, **kwargs): f = Future() f.set_result(None) return f class DummyAsyncCache(object): """Dummy Async Cache. Just stores things in a dict of fixed size.""" def __init__(self, limit=10): self._cache = {} self._cache_order = [] self.limit = limit def get(self, key): f = Future() f.set_result(self._cache.get(key)) return f def set(self, key, value, time=0): if key in self._cache and self._cache_order[-1] != key: idx = self._cache_order.index(key) del self._cache_order[idx] self._cache_order.append(key) else: if len(self._cache) >= self.limit: oldest = self._cache_order.pop(0) self._cache.pop(oldest) self._cache_order.append(key) self._cache[key] = value f = Future() f.set_result(None) return f class AsyncMemcache(object): """Wrap pylibmc.Client to run in a background thread via concurrent.futures.ThreadPoolExecutor """ def __init__(self, *args, **kwargs): self.pool = kwargs.pop('pool', None) or ThreadPoolExecutor(1) self.mc = pylibmc.Client(*args, **kwargs) self.mc_pool = pylibmc.ThreadMappedPool(self.mc) def get(self, key, *args, **kwargs): app_log.debug("memcache get submit %s", key) return self.pool.submit(self._threadsafe_get, key, *args, **kwargs) def _threadsafe_get(self, key, *args, **kwargs): app_log.debug("memcache get %s", key) with self.mc_pool.reserve() as mc: return mc.get(key, *args, **kwargs) def set(self, key, *args, **kwargs): app_log.debug("memcache set submit %s", key) return self.pool.submit(self._threadsafe_set, key, *args, **kwargs) def _threadsafe_set(self, key, value, *args, **kwargs): app_log.debug("memcache set %s", key) with self.mc_pool.reserve() as mc: return mc.set(key, value, *args, **kwargs) class AsyncMultipartMemcache(AsyncMemcache): """subclass of AsyncMemcache that splits large files into multiple chunks because memcached limits record size to 1MB """ def __init__(self, *args, **kwargs): self.chunk_size = kwargs.pop('chunk_size', 950000) self.max_chunks = kwargs.pop('max_chunks', 16) super(AsyncMultipartMemcache, self).__init__(*args, **kwargs) def _threadsafe_get(self, key, *args, **kwargs): app_log.debug("memcache get %s", key) keys = [('%s.%i' % (key, idx)).encode() for idx in range(self.max_chunks)] with self.mc_pool.reserve() as mc: values = mc.get_multi(keys, *args, **kwargs) parts = [] for key in keys: if key not in values: break parts.append(values[key]) if parts: compressed = b''.join(parts) try: return zlib.decompress(compressed) except zlib.error as e: app_log.error("zlib decompression of %s failed: %s", key, e) def _threadsafe_set(self, key, value, *args, **kwargs): app_log.debug("memcache set %s", key) chunk_size = self.chunk_size compressed = zlib.compress(value) offsets = range(0, len(compressed), chunk_size) app_log.debug('storing %s in %i chunks', key, len(offsets)) if len(offsets) > self.max_chunks: raise ValueError("file is too large: %sB" % len(compressed)) values = {} for idx, offset in enumerate(offsets): values[('%s.%i' % (key, idx)).encode()] = compressed[ offset:offset + chunk_size ] with self.mc_pool.reserve() as mc: return mc.set_multi(values, *args, **kwargs)
bsd-3-clause
xq262144/hue
desktop/core/ext-py/guppy-0.1.10/guppy/etc/cmd.py
37
15014
"""A generic class to build line-oriented command interpreters. Interpreters constructed with this class obey the following conventions: 1. End of file on input is processed as the command 'EOF'. 2. A command is parsed out of each line by collecting the prefix composed of characters in the identchars member. 3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method is passed a single argument consisting of the remainder of the line. 4. Typing an empty line repeats the last command. (Actually, it calls the method `emptyline', which may be overridden in a subclass.) 5. There is a predefined `help' method. Given an argument `topic', it calls the command `help_topic'. With no arguments, it lists all topics with defined help_ functions, broken into up to three topics; documented commands, miscellaneous help topics, and undocumented commands. 6. The command '?' is a synonym for `help'. The command '!' is a synonym for `shell', if a do_shell method exists. 7. If completion is enabled, completing commands will be done automatically, and completing of commands args is done by calling complete_foo() with arguments text, line, begidx, endidx. text is string we are matching against, all returned matches must begin with it. line is the current input line (lstripped), begidx and endidx are the beginning and end indexes of the text being matched, which could be used to provide different completion depending upon which position the argument is in. The `default' method may be overridden to intercept commands for which there is no do_ method. The `completedefault' method may be overridden to intercept completions for commands that have no complete_ method. The data member `self.ruler' sets the character used to draw separator lines in the help messages. If empty, no ruler line is drawn. It defaults to "=". If the value of `self.intro' is nonempty when the cmdloop method is called, it is printed out on interpreter startup. This value may be overridden via an optional argument to the cmdloop() method. The data members `self.doc_header', `self.misc_header', and `self.undoc_header' set the headers used for the help function's listings of documented functions, miscellaneous topics, and undocumented functions respectively. These interpreters use raw_input; thus, if the readline module is loaded, they automatically support Emacs-like command history and editing features. """ import string __all__ = ["Cmd"] PROMPT = '(Cmd) ' IDENTCHARS = string.ascii_letters + string.digits + '_' class Cmd: """A simple framework for writing line-oriented command interpreters. These are often useful for test harnesses, administrative tools, and prototypes that will later be wrapped in a more sophisticated interface. A Cmd instance or subclass instance is a line-oriented interpreter framework. There is no good reason to instantiate Cmd itself; rather, it's useful as a superclass of an interpreter class you define yourself in order to inherit Cmd's methods and encapsulate action methods. """ prompt = PROMPT identchars = IDENTCHARS ruler = '=' lastcmd = '' intro = None doc_leader = "" doc_header = "Documented commands (type help <topic>):" misc_header = "Miscellaneous help topics:" undoc_header = "Undocumented commands:" nohelp = "*** No help on %s" use_rawinput = 1 def __init__(self, completekey='tab', stdin=None, stdout=None): """Instantiate a line-oriented interpreter framework. The optional argument 'completekey' is the readline name of a completion key; it defaults to the Tab key. If completekey is not None and the readline module is available, command completion is done automatically. The optional arguments stdin and stdout specify alternate input and output file objects; if not specified, sys.stdin and sys.stdout are used. """ import sys if stdin is not None: self.stdin = stdin else: self.stdin = sys.stdin if stdout is not None: self.stdout = stdout else: self.stdout = sys.stdout self.cmdqueue = [] self.completekey = completekey def cmdloop(self, intro=None): """Repeatedly issue a prompt, accept input, parse an initial prefix off the received input, and dispatch to action methods, passing them the remainder of the line as argument. """ self.preloop() if self.use_rawinput and self.completekey: try: import readline self.old_completer = readline.get_completer() readline.set_completer(self.complete) readline.parse_and_bind(self.completekey+": complete") except ImportError: pass try: if intro is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+"\n") stop = None while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else: self.preinput() if self.use_rawinput: try: line = raw_input(self.prompt) except EOFError: line = 'EOF' else: self.stdout.write(self.prompt) self.stdout.flush() line = self.stdin.readline() if not len(line): line = 'EOF' else: line = line[:-1] # chop \n line = self.postinput(line) line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: if self.use_rawinput and self.completekey: try: import readline readline.set_completer(self.old_completer) except ImportError: pass def precmd(self, line): """Hook method executed just before the command line is interpreted, but after the input prompt is generated and issued. """ return line def postcmd(self, stop, line): """Hook method executed just after a command dispatch is finished.""" return stop def preinput(self): """Hook method executed just before an input line is read.""" def postinput(self, line): """Hook method executed just after an input line is read.""" return line def preloop(self): """Hook method executed once when the cmdloop() method is called.""" pass def postloop(self): """Hook method executed once when the cmdloop() method is about to return. """ pass def parseline(self, line): line = line.strip() if not line: return None, None, line elif line[0] == '?': line = 'help ' + line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else: return None, None, line i, n = 0, len(line) while i < n and line[i] in self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line def onecmd(self, line): """Interpret the argument as though it had been typed in response to the prompt. This may be overridden, but should not normally need to be; see the precmd() and postcmd() methods for useful execution hooks. The return value is a flag indicating whether interpretation of commands by the interpreter should stop. """ cmd, arg, line = self.parseline(line) if not line: return self.emptyline() if cmd is None: return self.default(line) self.lastcmd = line if cmd == '': return self.default(line) else: try: func = getattr(self, 'do_' + cmd) except AttributeError: return self.default(line) return func(arg) def emptyline(self): """Called when an empty line is entered in response to the prompt. If this method is not overridden, it repeats the last nonempty command entered. """ if self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line): """Called on an input line when the command prefix is not recognized. If this method is not overridden, it prints an error message and returns. """ self.stdout.write('*** Unknown syntax: %s\n'%line) def completedefault(self, *ignored): """Method called to complete an input line when no command-specific complete_*() method is available. By default, it returns an empty list. """ return [] def completenames(self, text, *ignored): dotext = 'do_'+text return [a[3:] for a in self.get_names() if a.startswith(dotext)] def complete(self, text, state): """Return the next possible completion for 'text'. If a command has not been entered, then complete against command list. Otherwise try to call complete_<command> to get list of completions. """ if state == 0: import readline origline = readline.get_line_buffer() line = origline.lstrip() stripped = len(origline) - len(line) begidx = readline.get_begidx() - stripped endidx = readline.get_endidx() - stripped if begidx>0: cmd, args, foo = self.parseline(line) if cmd == '': compfunc = self.completedefault else: try: compfunc = getattr(self, 'complete_' + cmd) except AttributeError: compfunc = self.completedefault else: compfunc = self.completenames self.completion_matches = compfunc(text, line, begidx, endidx) try: return self.completion_matches[state] except IndexError: return None def get_names(self): # Inheritance says we have to look in class and # base classes; order is not important. names = [] classes = [self.__class__] while classes: aclass = classes.pop(0) if aclass.__bases__: classes = classes + list(aclass.__bases__) names = names + dir(aclass) return names def complete_help(self, *args): return self.completenames(*args) def do_help(self, arg): if arg: # XXX check arg syntax try: func = getattr(self, 'help_' + arg) except AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__ if doc: self.stdout.write("%s\n"%str(doc)) return except AttributeError: pass self.stdout.write("%s\n"%str(self.nohelp % (arg,))) return func() else: names = self.get_names() cmds_doc = [] cmds_undoc = [] help = {} for name in names: if name[:5] == 'help_': help[name[5:]]=1 names.sort() # There can be duplicates if routines overridden prevname = '' for name in names: if name[:3] == 'do_': if name == prevname: continue prevname = name cmd=name[3:] if cmd in help: cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write("%s\n"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, help.keys(),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header, cmds, cmdlen, maxcol): if cmds: self.stdout.write("%s\n"%str(header)) if self.ruler: self.stdout.write("%s\n"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write("\n") def columnize(self, list, displaywidth=80): """Display a list of strings as a compact set of columns. Each column is only as wide as necessary. Columns are separated by two spaces (one was not legible enough). """ if not list: self.stdout.write("<empty>\n") return nonstrings = [i for i in range(len(list)) if not isinstance(list[i], str)] if nonstrings: raise TypeError, ("list[i] not a string for i in %s" % ", ".join(map(str, nonstrings))) size = len(list) if size == 1: self.stdout.write('%s\n'%str(list[0])) return # Try every row count from 1 upwards for nrows in range(1, len(list)): ncols = (size+nrows-1) // nrows colwidths = [] totwidth = -2 for col in range(ncols): colwidth = 0 for row in range(nrows): i = row + nrows*col if i >= size: break x = list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth + 2 if totwidth > displaywidth: break if totwidth <= displaywidth: break else: nrows = len(list) ncols = 1 colwidths = [0] for row in range(nrows): texts = [] for col in range(ncols): i = row + nrows*col if i >= size: x = "" else: x = list[i] texts.append(x) while texts and not texts[-1]: del texts[-1] for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write("%s\n"%str(" ".join(texts)))
apache-2.0
mtils/ems
ems/qt4/gui/itemdelegate/multiroledelegate.py
1
1523
from PyQt4.QtCore import QRect, Qt, QLine from PyQt4.QtGui import QStyledItemDelegate, QStyleOptionViewItemV4 from PyQt4.QtGui import qApp, QStyle, QPen, QColor class MultiRoleDelegate(QStyledItemDelegate): def __init__(self, *args, **kwargs): super(MultiRoleDelegate, self).__init__(*args, **kwargs) self.roles = [Qt.DisplayRole] def paint(self, painter, option, index): roleCount = len(self.roles) newHeight = int(round(float(option.rect.height()) / float(roleCount))) styleOptions = [] for idx, role in enumerate(self.roles): styleOption = QStyleOptionViewItemV4(option) styleOption.rect = QRect(option.rect) styleOption.rect.setHeight(newHeight) styleOption.rect.moveTop(styleOption.rect.top() + idx*newHeight) styleOption.text = index.data(role).toString() widget = self.parent() widget.style().drawControl(QStyle.CE_ItemViewItem, styleOption, painter, widget) if idx != 0: painter.save() pen = QPen(QColor(220,220,220)) pen.setStyle(Qt.DotLine) painter.setPen(pen) painter.drawLine(styleOption.rect.topLeft(), styleOption.rect.topRight()) painter.restore() def sizeHint(self, option, index): sizeHint = super(MultiRoleDelegate, self).sizeHint(option, index) sizeHint.setHeight(sizeHint.height()*len(self.roles)) return sizeHint
mit
bbc/kamaelia
Sketches/AM/KPIFramework/KPI/Client/Decryptor.py
3
3293
# -*- coding: utf-8 -*- # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import Axon import struct from KPI.Crypto import xtea class Decryptor(Axon.Component.component): Inboxes = {"inbox" : "encrypted data packets", "keyevent": "key for decryption", "control": "shutdown handling"} Outboxes = {"outbox" : "decrypted data packets", "signal": "shut handling"} def __init__(self): super(Decryptor,self).__init__() self.key = "\0" def main(self): blocksize = 8 MAGIC_STRING = blocksize * chr(0x80) while 1: yield 1 if self.dataReady("control"): data = self.recv("control") if data == "SHUTDOWN": self.send(data, "signal") print "decryptor shutdown" break if self.dataReady("keyevent"): self.key = self.recv("keyevent") #print "key recieved at the decryptor",self.key if self.dataReady("inbox") and self.key != "\0": data = self.recv("inbox") dec = '' pad = True datalen = len(data) #Unpad last byte with 0x80 followed by zero (null) bytes if datalen > blocksize: k = 0 if datalen > 2*blocksize: for i in range(0, datalen - 2*blocksize, blocksize): block = data[i:i+blocksize] dec = dec + xtea.xtea_decrypt(self.key,block) k = i + blocksize block1 = xtea.xtea_decrypt(self.key,data[k:k+blocksize]) block2 = xtea.xtea_decrypt(self.key,data[k+blocksize:datalen]) dec = dec + block1 if block2 == MAGIC_STRING: pad = False else: block = block2 else: block = xtea.xtea_decrypt(self.key,data) if pad == True: rindex = block.rfind(chr(0x80)) if rindex != -1: tmp = block[rindex:len(block)] pad = chr(0x80) + (len(block)-rindex-1)*chr(0x00) if(pad == tmp): print "remove padding", pad, "padlen", len(pad) block = block[:rindex] dec = dec + block #print "decrypted data ",dec self.send(dec, "outbox")
apache-2.0
sgnn7/sgfc
io_dev/sgfc_io/devices/pic18f45k50/hk_usb_io.py
1
8392
#!/usr/bin/python import sys import time import usb.core import usb.util from array import array # Markham Thomas 2015 # This version is under heavy development # You can add your test code to the bottom # below the === end of module statement, or # externally call the module # # ---- This version adds a python class for constants class Bunch(dict): def __init__(self, d = {}): dict.__init__(self, d) self.__dict__.update(d) def __setattr__(self, name, value): dict.__setitem__(self, name, value) object.__setattr__(self, name, value) def __setitem__(self, name, value): dict.__setitem__(self, name, value) object.__setattr__(self, name, value) def copy(self): return Bunch(dict.copy(self)) _mod_ver = '0.52' # python HKUSBIO module version _mod_date = '8/28/2015' # module date u_ad0 = 0x37 # read ADC value from RA0 u_ad1 = 0x38 # read ADC value from RA1 u_i2c_init = 0x40 # i2c_init(void) u_i2c_idle = 0x41 # i2c_idle(void) u_i2c_strt = 0x42 # i2c_start(uchar) u_i2c_stop = 0x43 # i2c_stop(void) u_i2c_slak = 0x44 # uchar i2c_slave_ack(void) u_i2c_writ = 0x45 # void i2c_write(uchar) u_i2c_mack = 0x46 # void i2c_master_ack(uchar) u_i2c_read = 0x47 # uchar i2c_read(void) u_i2c_dtrd = 0x48 # uchar i2c_isdatardy(void) u_spi_init = 0x50 # void spi_init(mode, baud, sample) u_spi_tran = 0x51 # uchar spi_transfer(regAddr) u_spi_cs = 0x52 # void spi_cs(enable|disable) u_rom = 0x85 # get PIC rom version u_led = 0x80 # toggle LED u_swc = 0x81 # get switch pressed or not u_gpd = 0x84 # configure GPIO direction on a pin u_gpi = 0x82 # read value on GPIO pin u_gpo = 0x83 # write value to GPIO pin u_uss = 0x86 # send a string to the UART u_tst = 0x87 # test if UART has a char available u_urc = 0x88 # read a single char from UART u_usc = 0x89 # send a single char to the UART h_getr = 0x98 # SFR register to read h_setr = 0x99 # SFR register to set h_getb = 0x9a # SFR read register bit h_setb = 0x9b # SFR set register bit rd4 = 1 # GPIO pin rd4 def=input rd5 = 2 # GPIO pin rd5 def=input rd6 = 3 # GPIO pin rd6 def=output rd7 = 4 # GPIO pin rd7 def=output dir_output = 0 # control GPIO pin direction dir_input = 1 SPI_LOW_BAUD = 0# 750khz SPI_MED_BAUD = 1# 3mhz SPI_HI_BAUD = 2# 12mhz SPI_SAMP_MID = 0# sample input in middle data input time SPI_SAMP_END = 1# sample input at end of data input SPI_MODE0 = 0 SPI_MODE1 = 1 SPI_MODE2 = 2 SPI_MODE3 = 3 SPI_CS_ENABLE = 0 SPI_CS_DISABLE = 1 I2C_DATA_ACK = 0 # i2c constants I2C_DATA_NOACK = 1 I2C_WRITE_CMD = 0 I2C_READ_CMD = 1 I2C_START_CMD = 0 I2C_REP_START_CMD = 1 I2C_REQ_ACK = 0 I2C_REQ_NOACK = 0 def init(): # setup USB device structure # find our device dev = usb.core.find(idVendor=0x04d8, idProduct=0x003f) # was it found if dev is None: raise ValueError('Device not found') # handle if device is busy if dev.is_kernel_driver_active(0) is True: dev.detach_kernel_driver(0) # set the active configuration. No args the first config # will become the active one dev.set_configuration() return dev def module_version(): a = 'Version: ' + _mod_ver + ', Date: ' + _mod_date return a def rom_version(dev): # get PIC ROM version # read ROM version dev.write(1, [u_rom], 100) ret = dev.read(0x81, 64, 100) rom_version = '' rom_version += chr(ret[1]) rom_version += '.' rom_version += chr(ret[2]) rom_version += chr(ret[3]) return rom_version def toggle_led(dev): # toggle LED dev.write(1, [u_led], 100) def read_switch(dev): # read switch press dev.write(1, [u_swc], 100) sw = dev.read(0x81, 64, 100) if (sw[1] == 0): return True else: return False def gpio_init(dev,pin,pdir): # set GPIO direction on pin dev.write(1,[u_gpd, pin, pdir], 100) def gpio_out(dev,pin): # otuput a value on GPIO pin dev.write(1, [u_gpo, pin, 1], 100) def gpio_in(dev,pin): # read value on GPIO pin dev.write(1,[u_gpi, pin], 100) ret = dev.read(0x81, 64, 100) return ret[1] def adc_ra0(dev): # do ADC conversion on RA0 dev.write(1,[u_ad0], 100) ret = dev.read(0x81, 64, 100) value = ret[2] << 8 value = value | ret[1] return value def adc_ra1(dev): # do ADC conversion on RA1 dev.write(1,[u_ad1], 100) ret = dev.read(0x81, 64, 100) value = ret[2] << 8 value = value | ret[1] return value def ser_test(dev): # check if a char available on serial port dev.write(1, [u_tst], 100) ret = dev.read(0x81, 64, 100) return ret[1] def ser_putc(dev,schar): # send a char to the serial port a = map( ord, schar) a.insert(0, u_usc) dev.write(1, a, 100) def ser_puts(dev, strval): # send a string to the serial port a = map( ord, strval) a.insert(0, u_uss) a.append(0) dev.write(1, a, 100) def ser_getc(dev): # get a single char from the serial port dev.write(1, [u_urc], 100) ret = dev.read(0x81, 64, 100) return ret[1] def sfr_get_reg(dev, reg): # get a SFR register a = array('B',[0,0,0,0,0,0,0,0,0,0,0,0,0,0]) a[10] = reg a[0] = h_getr dev.write(1, a, 100) ret = dev.read(0x81, 64, 100) return ret[1] def sfr_set_reg(dev, reg, rval): # set a SFR register a = array('B',[0,0,0,0,0,0,0,0,0,0,0,0,0,0]) a[10] = reg # register to select a[11] = rval # value to set a[0] = h_setr dev.write(1, a, 100) ret = dev.read(0x81, 64, 100) return ret[1] def sfr_get_regbit(dev, reg, bval): # get a SFR register bit a = array('B',[0,0,0,0,0,0,0,0,0,0,0,0,0,0]) a[10] = reg # register to select a[11] = bval # bit value to get a[0] = h_getb dev.write(1, a, 100) ret = dev.read(0x81, 64, 100) return ret[1] def sfr_set_regbit(dev, reg, rbit, bval): # set a SFR register bit a = array('B',[0,0,0,0,0,0,0,0,0,0,0,0,0,0]) a[10] = reg # register to select a[11] = rbit # bit to set a[12] = bval # bit value to set a[0] = h_setb dev.write(1, a, 100) ret = dev.read(0x81, 64, 100) return ret[1] def i2c_init(dev): # init i2c dev.write(1,[u_i2c_init], 200) def i2c_idle(dev): # i2c idle dev.write(1,[u_i2c_idle], 200) def i2c_start(dev, cval): # i2c start a = array('B',[0,0,0,0,0,0,0,0,0,0,0,0,0,0]) a[0] = u_i2c_strt a[1] = cval dev.write(1, a, 200) def i2c_stop(dev): # i2c stop dev.write(1,[u_i2c_stop], 200) def i2c_slave_ack(dev): # i2c slave ack dev.write(1,[u_i2c_slak], 200) ret = dev.read(0x81, 64, 200) return ret[1] # 1=no ack, 0=ack def i2c_write(dev, cval): # i2c write a = array('B',[0,0,0,0,0,0,0,0,0,0,0,0,0,0]) a[0] = u_i2c_writ a[1] = cval dev.write(1, a, 200) def i2c_master_ack(dev, cval): # 1=nack, 0=ack a = array('B',[0,0,0,0,0,0,0,0,0,0,0,0,0,0]) a[0] = u_i2c_mack a[1] = cval dev.write(1, a, 200) def i2c_read(dev): # i2c read dev.write(1,[u_i2c_read], 200) ret = dev.read(0x81, 64, 200) return ret[1] # i2c_read char def i2c_isdatardy(dev): # check if i2c char avail dev.write(1,[u_i2c_dtrd], 200) ret = dev.read(0x81, 64, 200) return ret[1] # i2c_read char def spi_init(dev, mode, baud, sample): # SPI init a = array('B',[0,0,0,0,0]) a[0] = u_spi_init a[1] = mode a[2] = baud a[3] = sample dev.write(1, a, 100) def spi_transfer(dev, value): # SPI transfer a = array('B',[0,0,0,0,0]) a[0] = u_spi_tran a[1] = value dev.write(1, a, 100) ret = dev.read(0x81, 64, 100) return ret[1] # ret SPI char read def spi_cs(dev, select): # enable or disable SPI CS a = array('B',[0,0,0,0,0]) a[0] = u_spi_cs a[1] = select dev.write(1, a, 100) def close(dev): # reset USB device dev.reset() #===================== end of module =========
lgpl-2.1
Alignak-monitoring-contrib/alignak-module-logs
alignak_module_logs/logs.py
2
26149
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2048: Alignak contrib team, see AUTHORS.txt file for contributors # # This file is part of Alignak contrib projet. # # Alignak is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Alignak is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Alignak. If not, see <http://www.gnu.org/licenses/>. """ This module is an Alignak Broker module that collects the `monitoring_log` broks to send them to a Python logger configured in the module configuration file """ import os import json import time import queue import logging from logging import Formatter from logging.handlers import TimedRotatingFileHandler from logging.config import dictConfig as logger_dictConfig import psutil from alignak.stats import Stats from alignak.basemodule import BaseModule from alignak_backend_client.client import Backend, BackendException from alignak_module_logs.logevent import LogEvent logger = logging.getLogger(__name__) # pylint: disable=invalid-name for handler in logger.parent.handlers: if isinstance(handler, logging.StreamHandler): logger.parent.removeHandler(handler) # pylint: disable=invalid-name properties = { 'daemons': ['broker'], 'type': 'logs', 'external': True, 'phases': ['running'], } class UTCFormatter(logging.Formatter): """This logging formatter converts the log date/time to UTC""" converter = time.gmtime def get_instance(mod_conf): """Return a module instance for the modules manager :param mod_conf: the module properties as defined globally in this file :return: """ # logger.info("Give an instance of %s for alias: %s", # mod_conf.python_name, mod_conf.module_alias) return MonitoringLogsCollector(mod_conf) class MonitoringLogsCollector(BaseModule): """Monitoring logs module main class""" def __init__(self, mod_conf): # pylint: disable=global-statement """Module initialization mod_conf is a dictionary that contains: - all the variables declared in the module configuration file - a 'properties' value that is the module properties as defined globally in this file :param mod_conf: module configuration file as a dictionary """ BaseModule.__init__(self, mod_conf) # pylint: disable=global-statement global logger logger = logging.getLogger('alignak.module.%s' % self.alias) # Do not change log level for this module ... # logger.setLevel(getattr(mod_conf, 'log_level', logging.INFO)) logger.debug("inner properties: %s", self.__dict__) logger.debug("received configuration: %s", mod_conf.__dict__) # Internal logger for the monitoring logs self.logger = None self.loop_count = 0 # Self daemon monitoring (cpu, memory) self.daemon_monitoring = False self.daemon_monitoring_period = 10 if 'ALIGNAK_DAEMON_MONITORING' in os.environ: self.daemon_monitoring = True try: self.daemon_monitoring_period = \ int(os.environ.get('ALIGNAK_DAEMON_MONITORING', '10')) except ValueError: # pragma: no cover, simple protection pass if self.daemon_monitoring: print("Module self monitoring is enabled, reporting every %d loop count." % self.daemon_monitoring_period) # Logger configuration file self.logger_configuration = os.getenv('ALIGNAK_MONITORING_LOGS_CFG', None) if not self.logger_configuration: self.logger_configuration = getattr(mod_conf, 'logger_configuration', None) if self.logger_configuration and self.logger_configuration != \ os.path.abspath(self.logger_configuration): self.logger_configuration = os.path.abspath(self.logger_configuration) # Logger default parameters (used if logger_configuration is not defined) self.default_configuration = True self.log_logger_name = getattr(mod_conf, 'log_logger_name', 'monitoring-logs') self.log_dir = getattr(mod_conf, 'log_dir', '/tmp') if "ALIGNAKLOG" in self.log_dir: self.log_dir = '/tmp' self.log_file = getattr(mod_conf, 'log_file', 'monitoring-logs.log') self.log_filename = os.path.join(self.log_dir, self.log_file) self.log_rotation_when = getattr(mod_conf, 'log_rotation_when', 'midnight') self.log_rotation_interval = int(getattr(mod_conf, 'log_rotation_interval', '1')) self.log_rotation_count = int(getattr(mod_conf, 'log_rotation_count', '365')) self.log_level = getattr(mod_conf, 'log_level', 'INFO') self.log_level = getattr(logging, self.log_level, None) self.log_format = getattr(mod_conf, 'log_format ', '[%(created)i] %(levelname)s: %(message)s') self.log_date = getattr(mod_conf, 'log_date', '%Y-%m-%d %H:%M:%S %Z') if not self.logger_configuration and not self.log_dir and not self.log_file: logger.info("The logging feature is disabled") else: if self.logger_configuration: logger.info("logger configuration defined in %s", self.logger_configuration) self.default_configuration = False if not os.path.exists(self.logger_configuration): self.default_configuration = True logger.warning("defined logger configuration file (%s) does not exist! " "Using default configuration.", self.logger_configuration) if self.default_configuration: logger.info("logger default configuration:") logger.info(" - rotating logs in %s", self.log_filename) logger.info(" - log level: %s", self.log_level) logger.info(" - rotation every %d %s, keeping %s files", self.log_rotation_interval, self.log_rotation_when, self.log_rotation_count) self.setup_logging() stats_host = getattr(mod_conf, 'statsd_host', 'localhost') stats_port = int(getattr(mod_conf, 'statsd_port', '8125')) stats_prefix = getattr(mod_conf, 'statsd_prefix', 'alignak') statsd_enabled = (getattr(mod_conf, 'statsd_enabled', '0') != '0') if isinstance(getattr(mod_conf, 'statsd_enabled', '0'), bool): statsd_enabled = getattr(mod_conf, 'statsd_enabled') graphite_enabled = (getattr(mod_conf, 'graphite_enabled', '0') != '0') if isinstance(getattr(mod_conf, 'graphite_enabled', '0'), bool): graphite_enabled = getattr(mod_conf, 'graphite_enabled') logger.info("StatsD configuration: %s:%s, prefix: %s, enabled: %s, graphite: %s", stats_host, stats_port, stats_prefix, statsd_enabled, graphite_enabled) self.statsmgr = Stats() # Configure our Stats manager if not graphite_enabled: self.statsmgr.register(self.alias, 'module', statsd_host=stats_host, statsd_port=stats_port, statsd_prefix=stats_prefix, statsd_enabled=statsd_enabled) else: self.statsmgr.connect(self.alias, 'module', host=stats_host, port=stats_port, prefix=stats_prefix, enabled=True) # logger.info("StatsD configuration: %s:%s, prefix: %s, enabled: %s", # getattr(mod_conf, 'statsd_host', 'localhost'), # int(getattr(mod_conf, 'statsd_port', '8125')), # getattr(mod_conf, 'statsd_prefix', 'alignak'), # (getattr(mod_conf, 'statsd_enabled', '0') != '0')) # self.statsmgr = Stats() # self.statsmgr.register(self.alias, 'module', # statsd_host=getattr(mod_conf, 'statsd_host', 'localhost'), # statsd_port=int(getattr(mod_conf, 'statsd_port', '8125')), # statsd_prefix=getattr(mod_conf, 'statsd_prefix', 'alignak'), # statsd_enabled=(getattr(mod_conf, 'statsd_enabled', '0') != '0')) # Alignak Backend part # --- self.backend_available = False self.backend_connected = False self.backend_url = getattr(mod_conf, 'alignak_backend', '') if self.backend_url: logger.info("Alignak backend endpoint: %s", self.backend_url) self.client_processes = int(getattr(mod_conf, 'client_processes', '1')) logger.info("Number of processes used by backend client: %s", self.client_processes) self.backend_connected = False self.backend_connection_retry_planned = 0 try: self.backend_connection_retry_delay = int(getattr(mod_conf, 'backend_connection_retry_delay', '10')) except ValueError: self.backend_connection_retry_delay = 10 self.backend_errors_count = 0 self.backend_username = getattr(mod_conf, 'username', '') self.backend_password = getattr(mod_conf, 'password', '') self.backend_generate = getattr(mod_conf, 'allowgeneratetoken', False) self.backend_token = getattr(mod_conf, 'token', '') self.backend = Backend(self.backend_url, self.client_processes) if not self.backend.token and not self.backend_username: logger.warning("No Alignak backend credentials configured (empty token and " "empty username. " "The requested backend connection will not be available") self.backend_url = '' else: # Log in to the backend self.logged_in = False self.backend_connected = self.backend_connection() self.backend_available = self.backend_connected # Get the default realm self.default_realm = self.get_default_realm() else: logger.warning('Alignak Backend is not configured. ' 'Some module features will not be available.') def init(self): """Handle this module "post" init ; just before it'll be started. Like just open necessaries file(s), database(s), or whatever the module will need. :return: None """ return True def setup_logging(self): """Setup logging configuration :return: none """ self.logger = logging.getLogger(self.log_logger_name) if self.default_configuration: # Set logger level self.logger.setLevel(self.log_level) logger.debug("Logger (default) handlers: %s", self.logger.handlers) if not self.logger.handlers: print("Log dir: %s" % self.log_dir) print("Log filename: %s" % self.log_filename) file_handler = TimedRotatingFileHandler(self.log_filename.replace("ALIGNAKLOG", self.log_dir), when=self.log_rotation_when, interval=self.log_rotation_interval, backupCount=self.log_rotation_count) file_handler.setFormatter(Formatter(self.log_format, self.log_date)) self.logger.addHandler(file_handler) logger.debug("Logger (default), added a TimedRotatingFileHandler") else: try: with open(self.logger_configuration, 'rt') as my_logger_configuration_file: config = json.load(my_logger_configuration_file) # Update the declared log file names with the log directory for hdlr in config['handlers']: if 'filename' in config['handlers'][hdlr]: config['handlers'][hdlr]['filename'] = \ config['handlers'][hdlr]['filename'].replace("ALIGNAKLOG", self.log_dir) logger_dictConfig(config) except ValueError as exp: logger.error("Logger configuration file is not parsable correctly!") logger.exception(exp) def backend_connection(self): """Backend connection to check live state update is allowed :return: True/False """ if self.backend_login(): self.get_default_realm() try: start = time.time() params = {'where': '{"token":"%s"}' % self.backend.token} users = self.backend.get('user', params) self.statsmgr.counter('backend-get.user', 1) self.statsmgr.timer('backend-get-time.user', time.time() - start) except BackendException as exp: logger.warning("Error on backend when retrieving user information: %s", exp) else: try: for item in users['_items']: self.logged_in = item['can_update_livestate'] return self.logged_in except Exception as exp: logger.error("Can't get the user information in the backend response: %s", exp) logger.error("Configured user account is not allowed for this module") return False def backend_login(self): """Log in to the backend :return: bool """ generate = 'enabled' if not self.backend_generate: generate = 'disabled' if self.backend_token: # We have a token, don't ask for a new one self.backend.token = self.backend_token connected = True # Not really yet, but assume yes else: if not self.backend_username or not self.backend_password: logger.error("No user or password supplied, and no default token defined. " "Can't connect to backend") connected = False else: try: start = time.time() connected = self.backend.login(self.backend_username, self.backend_password, generate) self.statsmgr.counter('backend-login', 1) self.statsmgr.timer('backend-login-time', time.time() - start) except BackendException as exp: logger.error("Error on backend login: %s", exp) connected = False return connected def get_default_realm(self): """ Retrieves the default top level realm for the connected user :return: str or None """ default_realm = None if self.backend_connected: try: start = time.time() result = self.backend.get('/realm', {'max_results': 1, 'sort': '_level'}) self.statsmgr.counter('backend-get.realm', 1) self.statsmgr.timer('backend-get-time.realm', time.time() - start) except BackendException as exp: logger.warning("Error on backend when retrieving default realm: %s", exp) else: try: default_realm = result['_items'][0]['_id'] except Exception as exp: logger.error("Can't get the default realm in the backend response: %s", exp) return default_realm def do_loop_turn(self): # pragma: no cover """This function is present because of an abstract function in the BaseModule class""" logger.info("In loop") time.sleep(1) def manage_brok(self, brok): """We got the data to manage :param brok: Brok object :type brok: object :return: False if a backend post error happens """ # Ignore all except 'monitoring_log' broks... if brok.type not in ['monitoring_log']: return False level = brok.data['level'].lower() if level not in ['debug', 'info', 'warning', 'error', 'critical']: return False logger.debug("Got monitoring log brok: %s", brok) # Send to configured logger if self.logger: message = brok.data['message'] message = message.replace('\r', '\\r') message = message.replace('\n', '\\n') func = getattr(self.logger, level) func(message) if not self.backend_url: return False if not self.backend_connected and int(time.time() > self.backend_connection_retry_planned): self.backend_connected = self.backend_connection() if not self.backend_connected: logger.error("Alignak backend connection is not available. Ignoring event.") return False # Try to get a monitoring event try: event = LogEvent(('[%s] ' % int(time.time())) + brok.data['message']) if event.valid: # ------------------------------------------- # Add an history event self.statsmgr.counter('monitoring-event-get.%s' % event.event_type, 1) data = {} if event.event_type == 'TIMEPERIOD': data = { "host_name": 'n/a', "service_name": 'n/a', "user_name": "Alignak", "type": "monitoring.timeperiod_transition", "message": brok.data['message'], } if event.event_type == 'NOTIFICATION': data = { "host_name": event.data['hostname'], "service_name": event.data['service_desc'] or 'n/a', "user_name": "Alignak", "type": "monitoring.notification", "message": brok.data['message'], } if event.event_type == 'ALERT': data = { "host_name": event.data['hostname'], "service_name": event.data['service_desc'] or 'n/a', "user_name": "Alignak", "type": "monitoring.alert", "message": brok.data['message'], } if event.event_type == 'DOWNTIME': downtime_type = "monitoring.downtime_start" if event.data['state'] == 'STOPPED': downtime_type = "monitoring.downtime_end" if event.data['state'] == 'CANCELLED': downtime_type = "monitoring.downtime_cancelled" data = { "host_name": event.data['hostname'], "service_name": event.data['service_desc'] or 'n/a', "user_name": "Alignak", "type": downtime_type, "message": brok.data['message'], } if event.event_type == 'FLAPPING': flapping_type = "monitoring.flapping_start" if event.data['state'] == 'STOPPED': flapping_type = "monitoring.flapping_stop" data = { "host_name": event.data['hostname'], "service_name": event.data['service_desc'] or 'n/a', "user_name": "Alignak", "type": flapping_type, "message": brok.data['message'], } if event.event_type == 'COMMENT': data = { "host_name": event.data['hostname'], "service_name": event.data['service_desc'] or 'n/a', "user_name": event.data['author'] or 'Alignak', "type": "webui.comment", "message": event.data['comment'], } if data: try: logger.debug("Posting history data: %s", data) start = time.time() self.backend.post('history', data) self.statsmgr.counter('monitoring-event-stored.%s' % event.event_type, 1) self.statsmgr.timer('backend-post-time.history', time.time() - start) except BackendException as exp: logger.exception("Exception: %s", exp) logger.error("Exception response: %s", exp.response) return False else: self.statsmgr.counter('monitoring-event-ignored.%s' % event.event_type, 1) logger.debug("Monitoring event not stored in the backend: %s", brok.data['message']) else: logger.warning("No monitoring event detected from: %s", brok.data['message']) except ValueError: logger.warning("Unable to decode a monitoring event from: %s", brok.data['message']) return True def main(self): """Main loop of the process This module is an "external" module :return: """ # Set the OS process title self.set_proctitle(self.alias) self.set_exit_handler() logger.info("starting...") # Increased on each loop turn self.loop_count = 0 while not self.interrupted: # Increment loop count self.loop_count += 1 try: queue_size = self.to_q.qsize() if queue_size: logger.debug("queue length: %s", queue_size) self.statsmgr.gauge('queue-size', queue_size) message = self.to_q.get_nowait() start = time.time() for brok in message: # Prepare and manage each brok in the queue message brok.prepare() self.manage_brok(brok) logger.debug("time to manage %s broks (%d secs)", len(message), time.time() - start) self.statsmgr.timer('managed-broks-time', time.time() - start) except queue.Empty: # logger.debug("No message in the module queue") time.sleep(0.1) if self.daemon_monitoring and (self.loop_count % self.daemon_monitoring_period == 1): perfdatas = [] my_process = psutil.Process() with my_process.oneshot(): perfdatas.append("num_threads=%d" % my_process.num_threads()) self.statsmgr.counter("num_threads", my_process.num_threads()) # perfdatas.append("num_ctx_switches=%d" % my_process.num_ctx_switches()) perfdatas.append("num_fds=%d" % my_process.num_fds()) # perfdatas.append("num_handles=%d" % my_process.num_handles()) perfdatas.append("create_time=%d" % my_process.create_time()) perfdatas.append("cpu_num=%d" % my_process.cpu_num()) self.statsmgr.counter("cpu_num", my_process.cpu_num()) perfdatas.append("cpu_usable=%d" % len(my_process.cpu_affinity())) self.statsmgr.counter("cpu_usable", len(my_process.cpu_affinity())) perfdatas.append("cpu_percent=%.2f%%" % my_process.cpu_percent()) self.statsmgr.counter("cpu_percent", my_process.cpu_percent()) cpu_times_percent = my_process.cpu_times() for key in cpu_times_percent._fields: perfdatas.append("cpu_%s_time=%.2fs" % (key, getattr(cpu_times_percent, key))) self.statsmgr.counter("cpu_%s_time" % key, getattr(cpu_times_percent, key)) memory = my_process.memory_full_info() for key in memory._fields: perfdatas.append("mem_%s=%db" % (key, getattr(memory, key))) self.statsmgr.counter("mem_%s" % key, getattr(memory, key)) logger.debug("Daemon %s (%s), pid=%s, ppid=%s, status=%s, cpu/memory|%s", self.name, my_process.name(), my_process.pid, my_process.ppid(), my_process.status(), " ".join(perfdatas)) logger.info("stopping...") # Properly close all the Python logging stuff # See: http://stackoverflow.com/questions/24816456/python-logging-wont-shutdown logging.shutdown() logger.info("stopped")
agpl-3.0
convexopt/gpkit
gpkit/tests/t_examples.py
1
6270
"""Unit testing of tests in docs/source/examples""" import unittest import os import numpy as np from gpkit import settings from gpkit.tests.helpers import generate_example_tests from gpkit.small_scripts import mag from gpkit.small_classes import Quantity def assert_logtol(first, second, logtol=1e-6): "Asserts that the logs of two arrays have a given abstol" np.testing.assert_allclose(np.log(mag(first)), np.log(mag(second)), atol=logtol, rtol=0) # pylint: disable=too-many-public-methods class TestExamples(unittest.TestCase): """ To test a new example, add a function called `test_$EXAMPLENAME`, where $EXAMPLENAME is the name of your example in docs/source/examples without the file extension. This function should accept two arguments (e.g. 'self' and 'example'). The imported example script will be passed to the second: anything that was a global variable (e.g, "sol") in the original script is available as an attribute (e.g., "example.sol") If you don't want to perform any checks on the example besides making sure it runs, just put "pass" as the function's body, e.g.: def test_dummy_example(self, example): pass But it's good practice to ensure the example's solution as well, e.g.: def test_dummy_example(self, example): self.assertAlmostEqual(example.sol["cost"], 3.121) """ # TODO: allow enabling plotting examples, make plots in correct folder... # def test_plot_sweep1d(self, _): # import matplotlib.pyplot as plt # plt.close("all") def test_autosweep(self, example): from gpkit import ureg bst1, tol1 = example.bst1, example.tol1 bst2, tol2 = example.bst2, example.tol2 l_ = np.linspace(1, 10, 100) for bst in [bst1, example.bst1_loaded]: sol1 = bst.sample_at(l_) assert_logtol(sol1("l"), l_) assert_logtol(sol1("A"), l_**2 + 1, tol1) assert_logtol(sol1["cost"], (l_**2 + 1)**2, tol1) if hasattr(sol1["cost"], "units"): # loaded costs are unitless self.assertEqual(Quantity(1.0, sol1["cost"].units), Quantity(1.0, ureg.m)**4) self.assertEqual(Quantity(1.0, sol1("A").units), Quantity(1.0, ureg.m)**2) ndig = -int(np.log10(tol2)) self.assertAlmostEqual(bst2.cost_at("cost", 3), 1.0, ndig) # before corner A_bc = np.linspace(1, 3, 50) sol_bc = bst2.sample_at(A_bc) assert_logtol(sol_bc("A"), (A_bc/3)**0.5, tol2) assert_logtol(sol_bc["cost"], A_bc/3, tol2) # after corner A_ac = np.linspace(3, 10, 50) sol_ac = bst2.sample_at(A_ac) assert_logtol(sol_ac("A"), (A_ac/3)**2, tol2) assert_logtol(sol_ac["cost"], (A_ac/3)**4, tol2) def test_model_var_access(self, example): model = example.PS _ = model["E"] with self.assertRaises(ValueError): _ = model["m"] # multiple variables called m def test_performance_modeling(self, example): pass def test_sp_to_gp_sweep(self, example): pass def test_boundschecking(self, example): pass def test_vectorize(self, example): pass def test_primal_infeasible_ex1(self, example): with self.assertRaises(RuntimeWarning) as cm: example.m.solve(verbosity=0) err = cm.exception if "mosek" in err.message: self.assertIn("PRIM_INFEAS_CER", err.message) elif "cvxopt" in err.message: self.assertIn("unknown", err.message) def test_primal_infeasible_ex2(self, example): with self.assertRaises(RuntimeWarning): example.m.solve(verbosity=0) def test_docstringparsing(self, example): pass def test_debug(self, example): pass def test_simple_sp(self, example): pass def test_simple_box(self, example): pass def test_x_greaterthan_1(self, example): pass def test_beam(self, example): self.assertFalse(np.isnan(example.sol("w")).any()) def test_water_tank(self, example): pass def test_sin_approx_example(self, example): pass def test_external_sp(self, example): pass def test_external_sp2(self, example): pass def test_simpleflight(self, example): self.assertTrue(example.sol.almost_equal(example.sol_loaded)) for sol in [example.sol, example.sol_loaded]: freevarcheck = { "A": 8.46, "C_D": 0.0206, "C_f": 0.0036, "C_L": 0.499, "Re": 3.68e+06, "S": 16.4, "W": 7.34e+03, "V": 38.2, "W_w": 2.40e+03 } # sensitivity values from p. 34 of W. Hoburg's thesis senscheck = { r"(\frac{S}{S_{wet}})": 0.4300, "e": -0.4785, "V_{min}": -0.3691, "k": 0.4300, r"\mu": 0.0860, "(CDA0)": 0.0915, "C_{L,max}": -0.1845, r"\tau": -0.2903, "N_{ult}": 0.2903, "W_0": 1.0107, r"\rho": -0.2275 } for key in freevarcheck: sol_rat = mag(sol["variables"][key])/freevarcheck[key] self.assertTrue(abs(1-sol_rat) < 1e-2) for key in senscheck: sol_rat = sol["sensitivities"]["constants"][key]/senscheck[key] self.assertTrue(abs(1-sol_rat) < 1e-2) def test_relaxation(self, example): pass def test_unbounded(self, example): pass FILE_DIR = os.path.dirname(os.path.realpath(__file__)) EXAMPLE_DIR = os.path.abspath(FILE_DIR + '../../../docs/source/examples') SOLVERS = settings["installed_solvers"] if os.path.isdir(EXAMPLE_DIR): TESTS = generate_example_tests(EXAMPLE_DIR, [TestExamples], SOLVERS) else: TESTS = [] if __name__ == "__main__": # pylint:disable=wrong-import-position from gpkit.tests.helpers import run_tests run_tests(TESTS)
mit
seanli9jan/tensorflow
tensorflow/python/profiler/tfprof_logger_test.py
48
2977
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class TFProfLoggerTest(test.TestCase): def _BuildSmallPlaceholderlModel(self): a = array_ops.placeholder(dtypes.int32, [2, 2]) b = array_ops.placeholder(dtypes.int32, [2, 2]) y = math_ops.matmul(a, b) return a, b, y def _BuildSmallModel(self): a = constant_op.constant([[1, 2], [3, 4]]) b = constant_op.constant([[1, 2], [3, 4]]) return math_ops.matmul(a, b) # pylint: disable=pointless-string-statement """# TODO(xpan): This out of core so it doesn't depend on contrib. def testFillMissingShape(self): a, b, y = self._BuildSmallPlaceholderlModel() run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() sess = session.Session() sess.run(y, options=run_options, run_metadata=run_metadata, feed_dict={a: [[1, 2], [2, 3]], b: [[1, 2], [2, 3]]}) graph2 = ops.Graph() # Use copy_op_to_graph to remove shape information. y2 = copy_elements.copy_op_to_graph(y, graph2, []) self.assertEquals('<unknown>', str(y2.get_shape())) tfprof_logger._fill_missing_graph_shape(graph2, run_metadata) self.assertEquals('(2, 2)', str(y2.get_shape())) def testFailedFillMissingShape(self): y = self._BuildSmallModel() run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() sess = session.Session() sess.run(y, options=run_options, run_metadata=run_metadata) graph2 = ops.Graph() y2 = copy_elements.copy_op_to_graph(y, graph2, []) self.assertEquals('<unknown>', str(y2.get_shape())) # run_metadata has special name for MatMul, hence failed to fill shape. tfprof_logger._fill_missing_graph_shape(graph2, run_metadata) self.assertEquals('<unknown>', str(y2.get_shape())) """ if __name__ == '__main__': test.main()
apache-2.0
hforge/Localizer
zgettext.py
1
8628
#!/usr/bin/env python # -*- coding: UTF-8 -*- # Copyright (C) 2001 Andrés Marzal Varo # Copyright (C) 2001-2002 J. David Ibáñez <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ zgettext.py is a script that parses DTML files and generates .pot and .po files, and then generates .mo files from the .po files. Future (XXX): zgettext should provide a similar interface to xgettext, it just should detect dtml and zpt files, parse them, and call xgettext for the rest. another script should do the wrap up to easily create multilingual products, or maybe we could avoid this and just use make Anyway, the trend is to levereage the gettext tools as much as posible. """ # Import from the Standard Library from os import listdir, mkdir, remove, system from os.path import exists, isdir from re import compile, DOTALL, findall import sys from tempfile import mktemp from time import gmtime, strftime, time # Import from itools from itools.handlers import get_handler from itools.gettext import POFile # Exceptions class UnknownStatus(Exception): pass def create_mo_files(): for filename in [ x for x in listdir('locale') if x.endswith('.po') ]: language = filename[:-3] system('msgfmt locale/%s.po -o locale/%s.mo' % (language, language)) def parse_generic(text, commands=('gettext',)): """Search for patterns like: gettext('message'). XXX Originally it was used to parse Python code, but it fails to parse some of the Python strings, now xgettext is used instead. So currently this function is only used to parse DTML and ZPT; probably the regular expression could be simplified as in DTML and ZPT there're (maybe) less options for Python strings due to the syntax constrains of these languages. """ r = [] for command in commands: pattern = command + '\s*\(\s*(\'.*?[^\\\\]\'|\".*?[^\\\\]\")\s*\)' regex = compile(pattern, DOTALL) r.extend([ x[1:-1] for x in findall(regex, text) ]) return r def parse_dtml(text): """Extract the messages from a DTML template. """ messages = parse_generic(text) # Search the "<dtml-gettext>message</dtml-gettext>" pattern regex = compile('<dtml-gettext(.*?)>(.*?)</dtml-gettext>', DOTALL) for parameters, message in findall(regex, text): if parameters.find('verbatim') == -1: message = ' '.join([ x.strip() for x in message.split() ]) messages.append(message) return messages def parse_zpt(text): """Extract the messages from a ZPT template. XXX It should be improved to parse the i18n namespace. """ return parse_generic(text) def do_all(filenames, languages): # Create the locale directory if not isdir('./locale'): try: mkdir('./locale') except OSError, msg: sys.stderr.write('Error: Cannot create directory "locale".\n%s\n' % msg) sys.exit(1) # Create the pot file if not exists('locale/locale.pot'): f = open('locale/locale.pot', 'w') f.write("# SOME DESCRIPTIVE TITLE.\n") f.write("# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER\n") f.write("# This file is distributed under the same license as the PACKAGE package.\n") f.write("# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\n") f.write("#\n") f.write("#, fuzzy\n") f.write('msgid ""\n') f.write('msgstr ""\n') f.write('"Project-Id-Version: PACKAGE VERSION\\n"\n') f.write('"POT-Creation-Date: %s\\n"\n' % strftime('%Y-%m-%d %H:%m+%Z', gmtime(time()))) f.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n') f.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n') f.write('"Language-Team: LANGUAGE <[email protected]>\\n"\n') f.write('"MIME-Version: 1.0\\n"\n') f.write('"Content-Type: text/plain; charset=CHARSET\\n"\n') f.write('"Content-Transfer-Encoding: 8bit\\n"\n') f.close() # Filter and parse the DTML and ZPT files, the rest will be parsed # with xgettext. filenames2 = [] messages = [] for filename in filenames: filetype = filename.split('.')[-1] if filetype == 'dtml': text = open(filename).read() messages.extend(parse_dtml(text)) elif filetype == 'zpt': text = open(filename).read() messages.extend(parse_zpt(text)) else: filenames2.append(filename) filenames = [] # Write a PO file with the messages from DTML and ZPT if messages: filename = mktemp('.po') filenames.append(filename) f = open(filename, 'w') aux = [] for message in messages: if message not in aux: f.write('msgid "%s"\n' % message) f.write('msgstr ""\n') f.write('\n') aux.append(message) f.close() # Parse the rest of the files if filenames2: po = POFile() for filename in filenames2: handler = get_handler(filename) for source, context, line in handler.get_units(): po.add_unit(filename, source, context, line) filename = mktemp('.po') filenames.append(filename) open(filename, 'w').write(po.to_str()) # Create the POT file if filenames: filename = mktemp('.po') cmd = 'msgcat -s --output-file=%s %s' % (filename, ' '.join(filenames)) system(cmd) system('msgmerge -U locale/locale.pot %s' % filename) # Remove temporal files remove(filename) for filename in filenames: remove(filename) # Generate the PO files for language in languages: if exists('./locale/%s.po' % language): # a .po file already exist, merge it with locale.pot system('msgmerge -U locale/%s.po locale/locale.pot' % language) else: # po doesn't exist, just copy locale.pot text = open('./locale/locale.pot').read() open('./locale/%s.po' % language, 'w').write(text) if __name__ == '__main__': # Parse the command line status = 0 files = [] langs = [] for arg in sys.argv[1:]: if status == 0: if arg == '-h': status = 1 elif arg == '-m': status = 2 elif arg == '-l': status = 3 else: files.append(arg) status = 4 elif status == 1: status = 'Error' break elif status == 2: status = 'Error' break elif status == 3: langs.append(arg) status = 5 elif status == 4: if arg == '-l': status = 3 else: files.append(arg) elif status == 5: langs.append(arg) else: raise UnknownStatus, str(status) # Action if status in (0, 1, 3, 'Error'): # Provide help if the line format is wrong or if the -h modifier # is provided print 'Usage:' print ' zgettext.py -h' print ' Shows this help message.' print ' zgettext.py [file file ... file] [-l languages]' print ' Parses all the specified files, creates the locale' print ' directory, creates the locale.pot file and the .po' print ' files of the languages specified.' print ' zgettxt.py -m' print ' Compiles all the .po files in the locale directory' print ' and creates the .mo files.' print print 'Examples:' print ' zgettext.py *.dtml -l ca es en' print ' zgettext.py -m' elif status == 2: create_mo_files() elif status in (4, 5): do_all(files, langs) else: raise UnknownStatus, str(status)
gpl-3.0
michaelkuty/django-cacheback
cacheback/decorators.py
5
1455
from functools import wraps from django.utils.decorators import available_attrs from cacheback.function import FunctionJob def cacheback(lifetime=None, fetch_on_miss=None, job_class=None, task_options=None, **job_class_kwargs): """ Decorate function to cache its return value. :lifetime: How long to cache items for :fetch_on_miss: Whether to perform a synchronous fetch when no cached result is found :job_class: The class to use for running the cache refresh job. Defaults using the FunctionJob. :job_class_kwargs: Any extra kwargs to pass to job_class constructor. Useful with custom job_class implementations. """ if job_class is None: job_class = FunctionJob job = job_class(lifetime=lifetime, fetch_on_miss=fetch_on_miss, task_options=task_options, **job_class_kwargs) def _wrapper(fn): # using available_attrs to work around http://bugs.python.org/issue3445 @wraps(fn, assigned=available_attrs(fn)) def __wrapper(*args, **kwargs): return job.get(fn, *args, **kwargs) # Assign reference to unwrapped function so that we can access it # later without descending into infinite regress. __wrapper.fn = fn # Assign reference to job so we can use the full Job API __wrapper.job = job return __wrapper return _wrapper
mit
shivamMg/toy-task
toytask/toytask/views.py
1
3161
import json import os import random import cv2 from django.shortcuts import render, HttpResponseRedirect from django.core.urlresolvers import reverse from django.views.decorators.csrf import csrf_exempt from toytask.settings import MEDIA_ROOT from . import modules from .forms import UploadImageForm def imagepath(imagename): return os.path.join(MEDIA_ROOT, 'images', imagename) @csrf_exempt def home(request): if request.method == 'POST': pipeline_str = request.POST.get('pipeline', False) if pipeline_str: pipeline = json.loads(pipeline_str) request.session['pipeline'] = pipeline return HttpResponseRedirect(reverse('params')) return render(request, 'pipeline.html', { 'module_list': json.dumps(modules.info_list()),} ) @csrf_exempt def params(request): pipeline = request.session.get('pipeline', False) if not pipeline: return HttpResponseRedirect(reverse('home')) form_list = [] form_dict = modules.form_dict() info_dict = modules.info_dict() if request.method == 'GET': image_form = UploadImageForm() for i, module_handle in enumerate(pipeline): form_class = form_dict[module_handle] info = info_dict[module_handle] form_list.append({ 'form': form_class(prefix='form_'+str(i)), 'name': info['Name'],} ) elif request.method == 'POST': image_form = UploadImageForm(request.POST, request.FILES) for i, module_handle in enumerate(pipeline): form_class = form_dict[module_handle] info = info_dict[module_handle] form_list.append({ 'form': form_class(request.POST, prefix='form_'+str(i)), 'name': info['Name'],} ) for form in form_list: if not form['form'].is_valid(): break else: if image_form.is_valid(): new_image = modules.UploadImageModel( image_file=request.FILES['image_file']) new_image.save() original_image = request.FILES['image_file'].name imagename = original_image image_list = [] for i, form in enumerate(form_list): model = form['form'].save(commit=False) imgpath = imagepath(imagename) img = cv2.imread(imgpath) dst = model.module_func(img) dstname = 'lena_{0}.png'.format(random.randrange(1, 100)) cv2.imwrite(imagepath(dstname), dst) image_list.append({ 'imagename': dstname, 'module': form['name'],} ) imagename = dstname return render(request, 'result.html', { 'original_image': original_image, 'image_list': image_list,} ) return render(request, 'params.html', { 'image_form': image_form, 'form_list': form_list,} )
mit
liffiton/ATLeS
src/analysis/plot.py
1
11295
import math import re import matplotlib import matplotlib.pyplot as plt from matplotlib import collections, lines, patches from analysis import heatmaps import config # Source: https://gist.github.com/jasonmc/1160951 def _set_foregroundcolor(ax, color): '''For the specified axes, sets the color of the frame, major ticks, tick labels, axis labels, title and legend ''' for tl in ax.get_xticklines() + ax.get_yticklines(): tl.set_color(color) for spine in ax.spines: ax.spines[spine].set_edgecolor(color) for tick in ax.xaxis.get_major_ticks(): tick.label1.set_color(color) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_color(color) ax.axes.xaxis.label.set_color(color) ax.axes.yaxis.label.set_color(color) ax.axes.xaxis.get_offset_text().set_color(color) ax.axes.yaxis.get_offset_text().set_color(color) ax.axes.title.set_color(color) lh = ax.get_legend() if lh is not None: lh.get_title().set_color(color) lh.legendPatch.set_edgecolor('none') labels = lh.get_texts() for lab in labels: lab.set_color(color) for tl in ax.get_xticklabels(): tl.set_color(color) for tl in ax.get_yticklabels(): tl.set_color(color) # Source: https://gist.github.com/jasonmc/1160951 def _set_backgroundcolor(ax, color): '''Sets the background color of the current axes (and legend). Use 'None' (with quotes) for transparent. To get transparent background on saved figures, use: pp.savefig("fig1.svg", transparent=True) ''' ax.patch.set_facecolor(color) lh = ax.get_legend() if lh is not None: lh.legendPatch.set_facecolor(color) def format_axis(ax): _set_foregroundcolor(ax, '0.5') _set_backgroundcolor(ax, '0.08') # drop plot borders for spine in ax.spines: ax.spines[spine].set_visible(False) def _format_figure(fig): fig.patch.set_facecolor('0.12') plt.tight_layout() def show(): ''' Shows the current figure (on screen, if using a GUI backend). Create a plot first using a TrackPlotter object. ''' fig = plt.gcf() _format_figure(fig) plt.show() plt.close('all') def savefig(outfile, format=None): ''' Saves the current figure to the given filename or file-like object. Format is inferred from the file extension if a name is given, otherwise specify it manually with the format parameter. Large (tall) figures are broken into multiple images (vertical tiles) if outfile is a string (filename). Create a plot first using a TrackPlotter object or other code that creates a pyplot figure. ''' fig = plt.gcf() _format_figure(fig) # A bit of an ugly hack to split giant images into multiple parts # Only used if outfile is given as a string (filename) max_height = 100 if isinstance(outfile, str) else float('inf') # plot height in inches height = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted()).height if height > max_height: numparts = int(height / max_height) + 1 for i in range(numparts): filename = re.sub(r"(\.[^\.]+)$", r"%02d\1" % (numparts-i), outfile) bbox = matplotlib.transforms.Bbox.from_extents([0,i*max_height,12,min(height,(i+1)*max_height)]) plt.savefig(filename, facecolor=fig.get_facecolor(), edgecolor='none', bbox_inches=bbox, format=format) else: plt.savefig(outfile, facecolor=fig.get_facecolor(), edgecolor='none', format=format) plt.close('all') class TrackPlotter(object): def __init__(self, track_processor, dbgframes=None): self._track = track_processor self._dbgframes = dbgframes @staticmethod def _speed2color(speed): # setup ranges, where 0 maps to first number, 1.0 maps to second color_ranges = { 'r': (0.8, 1.0), 'g': (0.8, 0.0), 'b': (0.8, 0.0) } def scale(inval, color): range = color_ranges[color] scaled = range[0] + (range[1] - range[0]) * inval return min(1, max(0, scaled)) # constrain to 0-1 r = scale(speed, 'r') g = scale(speed, 'g') b = scale(speed, 'b') return (r,g,b, 0.5) def plot_trace(self): # one minute per subplot numplots = self._track.len_minutes fig = plt.figure(figsize=(12,2*(numplots+1))) # Draw the legend at the top self.draw_legend(plt.subplot(numplots+1, 1, 1)) for i in range(numplots): ax = plt.subplot(numplots+1, 1, i+2) self._plot_trace_portion(ax, start_min=i, end_min=i+1) return fig def draw_legend(self, legend_ax): # Make a legend with proxy artists xpos_artist = lines.Line2D([],[], color='orange') ypos_artist = lines.Line2D([],[], color='limegreen') numpts_artist = lines.Line2D([],[], color='purple', linewidth=1) frozen_artist = patches.Rectangle((0,0), 1, 1, fc='lightblue', ec='None') missing_artist = patches.Rectangle((0,0), 1, 1, fc='yellow', ec='None') lost_artist = patches.Rectangle((0,0), 1, 1, fc='red', ec='None') # Place it in center of top "subplot" area legend_ax.legend( [xpos_artist, ypos_artist, numpts_artist, frozen_artist, missing_artist, lost_artist], ['x-pos', 'y-pos', '# Detection pts', 'Frozen', 'Missing', 'Lost'], loc='center', fontsize=12, ncol=4, ) legend_ax.axis('off') format_axis(legend_ax) def plot_invalidheatmap(self): title = "Map of shame (loc of invalid data)" plt.figure(figsize=(4, 4)) ax = plt.gca() ax.set_title(title) format_axis(ax) nbins = 50 badpoints = (self._track.df.valid != True) # noqa: E712 heatmaps.plot_heatmap(ax, self._track.df.x[badpoints], self._track.df.y[badpoints], nbins=nbins) def plot_heatmap(self, plot_type='overall'): assert plot_type in ('per-minute', 'per-phase', 'overall') if plot_type == 'per-minute': numplots = self._track.len_minutes elif plot_type == 'per-phase': numplots = self._track.num_phases() phase_starts = self._track.phase_starts() phase_ends = phase_starts[1:] + [2**30] elif plot_type == 'overall': numplots = 1 numrows = int(math.ceil(numplots / 10.0)) if plot_type == 'overall': plt.figure(figsize=(4, 4)) else: plt.figure(figsize=(2*min(numplots, 10), 2*numrows)) for i in range(numplots): if plot_type == 'per-minute': start_min = i end_min = i+1 title = "{}:00-{}:00".format(start_min, end_min) elif plot_type == 'per-phase': start_min = phase_starts[i] end_min = phase_ends[i] title = "Phase {} ({}:00-{}:00)".format(i+1, start_min, end_min) elif plot_type == 'overall': start_min = 0 end_min = 2**30 title = "Overall heatmap" ax = plt.subplot(numrows, min(numplots, 10), i+1) if numplots > 1: ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) format_axis(ax) ax.set_title(title) nbins = 50 start_sec = start_min*60 end_sec = end_min*60 heatmaps.plot_heatmap(ax, self._track.df.x[start_sec:end_sec], self._track.df.y[start_sec:end_sec], nbins=nbins) def _plot_trace_portion(self, ax, start_min, end_min): ''' Parameters: start_min, end_min: Integer minutes. Plot should be from start:00 to end:00. ''' # shorthand df = self._track.df start = start_min * 60 end = end_min * 60 time = df.index.to_series()[start:end].values #theta = self._track.theta[start:end] #speed = self._track.speed[start:end] #valid = self._track.valid[start:end] lost = df.lost[start:end].values missing = df.missing[start:end].values frozen = df.frozen[start:end].values x = df.x[start:end].values y = df.y[start:end].values numpts = df.numpts[start:end].values # Format nicely format_axis(ax) ax.axes.get_yaxis().set_visible(False) # Get set axes (specifically, we don't want the y-axis to be autoscaled for us) ax.axis([start, end, -1.0, 1.0]) # Mark lost/missing sections lost_collection = collections.BrokenBarHCollection.span_where( time, -1.0, -0.9, lost, edgecolors='none', facecolors='red', ) ax.add_collection(lost_collection) missing_collection = collections.BrokenBarHCollection.span_where( time, -1.0, -0.9, missing, edgecolors='none', facecolors='yellow', ) ax.add_collection(missing_collection) # Mark frozen sections frozen_collection = collections.BrokenBarHCollection.span_where( time, -0.85, -0.8, frozen, edgecolors='none', facecolors='lightblue', ) ax.add_collection(frozen_collection) # Plot horizontal position ax.plot(time, x*2-1, color='orange', label='x position') # Plot height ax.plot(time, y*2-1, color='limegreen', label='y position') # Plot numpts (scaled so 0 = -1.0 (plot bottom), 20 = 1.0 (top)) ax.plot(time, -1.0+(numpts/10.0), color='purple', linewidth=1, label='# detected points') # Add stick plot of movement (where valid) # ax.quiver( # time, [0] * len(time), # speed*np.cos(theta), speed*np.sin(theta), # color=[self._speed2color(s) for s in speed], # scale=1, # scale all to a speed of 1, which should be close to max (tank is 1.0x1.0) # scale_units='y', # width=0.01, # units='inches', # headlength=0, headwidth=0, headaxislength=0 # no arrowheads # ) # Add markers/links to debugframes if given # Get [tracking]:start_frame for proper offset of debug frame numbers into track data here start_frame = int(self._track.config['tracking']['start_frame']) for dbgframe in self._dbgframes: nameparts = dbgframe.name.split('_') frameindex = max(0, int(nameparts[1]) - start_frame) # restrict to index 0 at minimum frametime = self._track.df.index[frameindex] if start <= frametime < end: marker = matplotlib.patches.Circle( (frametime, -1.1), radius=0.08, color='#337AB7', clip_on=False, url=str("/data" / dbgframe.relative_to(config.DATADIR)) ) ax.add_artist(marker)
mit
zhang-alex/bash-modules
en/Lib/encodings/cp1257.py
593
13630
""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1257', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\u20ac' # 0x80 -> EURO SIGN u'\ufffe' # 0x81 -> UNDEFINED u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK u'\ufffe' # 0x83 -> UNDEFINED u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS u'\u2020' # 0x86 -> DAGGER u'\u2021' # 0x87 -> DOUBLE DAGGER u'\ufffe' # 0x88 -> UNDEFINED u'\u2030' # 0x89 -> PER MILLE SIGN u'\ufffe' # 0x8A -> UNDEFINED u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK u'\ufffe' # 0x8C -> UNDEFINED u'\xa8' # 0x8D -> DIAERESIS u'\u02c7' # 0x8E -> CARON u'\xb8' # 0x8F -> CEDILLA u'\ufffe' # 0x90 -> UNDEFINED u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK u'\u2022' # 0x95 -> BULLET u'\u2013' # 0x96 -> EN DASH u'\u2014' # 0x97 -> EM DASH u'\ufffe' # 0x98 -> UNDEFINED u'\u2122' # 0x99 -> TRADE MARK SIGN u'\ufffe' # 0x9A -> UNDEFINED u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK u'\ufffe' # 0x9C -> UNDEFINED u'\xaf' # 0x9D -> MACRON u'\u02db' # 0x9E -> OGONEK u'\ufffe' # 0x9F -> UNDEFINED u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\ufffe' # 0xA1 -> UNDEFINED u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa4' # 0xA4 -> CURRENCY SIGN u'\ufffe' # 0xA5 -> UNDEFINED u'\xa6' # 0xA6 -> BROKEN BAR u'\xa7' # 0xA7 -> SECTION SIGN u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xAC -> NOT SIGN u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\xb2' # 0xB2 -> SUPERSCRIPT TWO u'\xb3' # 0xB3 -> SUPERSCRIPT THREE u'\xb4' # 0xB4 -> ACUTE ACCENT u'\xb5' # 0xB5 -> MICRO SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE u'\xb9' # 0xB9 -> SUPERSCRIPT ONE u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS u'\xe6' # 0xBF -> LATIN SMALL LETTER AE u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON u'\u02d9' # 0xFF -> DOT ABOVE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
mit
rowhit/h2o-2
py/testdir_single_jvm/test_GLM2_princeton.py
9
1776
import unittest, time, sys sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_import as h2i class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(1) global SYNDATASETS_DIR SYNDATASETS_DIR = h2o.make_syn_dir() @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GLM2_princeton(self): # filename, y, timeoutSecs # these are all counts? using gaussian? csvFilenameList = [ ('cuse.dat', 'gaussian', 3, 10), # notUsing ('cuse.dat', 'gaussian', 4, 10), # using ('copen.dat', 'gaussian', 4, 10), ('housing.raw', 'gaussian', 4, 10), ] trial = 0 for (csvFilename, family, y, timeoutSecs) in csvFilenameList: csvPathname1 = 'logreg/princeton/' + csvFilename fullPathname1 = h2i.find_folder_and_filename('smalldata', csvPathname1, returnFullPath=True) csvPathname2 = SYNDATASETS_DIR + '/' + csvFilename + '_stripped.csv' h2o_util.file_strip_trailing_spaces(fullPathname1, csvPathname2) parseResult = h2i.import_parse(path=csvPathname2, schema='put', timeoutSecs=timeoutSecs) start = time.time() kwargs = {'n_folds': 0, 'family': family, 'response': y} glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) print "glm end (w/check) on ", csvPathname2, 'took', time.time() - start, 'seconds' trial += 1 print "\nTrial #", trial if __name__ == '__main__': h2o.unit_main()
apache-2.0