repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
weblabdeusto/weblabdeusto
refs/heads/master
server/launch/sample/main_machine/main_instance/experiment_jsfpga/server_config.py
3
#!/usr/bin/env python #-*-*- encoding: utf-8 -*-*- xilinx_board_type = 'FPGA' weblab_xilinx_experiment_port_number = 1 # This should be something like this: # import os as _os # xilinx_home = _os.getenv('XILINX_HOME') # if xilinx_home == None: # if _os.name == 'nt': # xilinx_home = r'C:\Program Files\Xilinx' # elif _os.name == 'posix': # xilinx_home = r"/home/nctrun/Xilinx" # # if _os.name == 'nt': # xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact'] # elif _os.name == 'posix': # xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact'] # But for testing we are going to fake it: xilinx_home = "." xilinx_impact_full_path = ["python","../../src/test/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ] xilinx_programmer_type = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept' xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice' xilinx_serial_port_is_fake = True xilinx_batch_content_FPGA = """setMode -bs setCable -port auto addDevice -position 1 -file $FILE Program -p 1 exit """ digilent_adept_full_path = ["python","../../src/test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ] digilent_adept_batch_content = """something with the variable $FILE""" xilinx_http_device_ip_FPGA = "192.168.50.138" xilinx_http_device_port_FPGA = 80 xilinx_http_device_app_FPGA = "" xilinx_programmer_time = 60 # seconds xilinx_synthesizer_time = 120 # This is the time, in seconds, that we estimate it will take to synthesize VHDL. It is only # applied when raw VHDL is sent, rather than an already synthesized BIT file. xilinx_adaptive_time = True # When set to true, the fixed times above will be modified dynamically and automatically. fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg''' # Path to the UCF, VHD, and project files, which is also the path where the .BIT files will be # generated. xilinx_compiling_files_path = "main_machine/main_instance/experiment_fpga/files" # Path to the Xilinx tools used for compiling (par, xst, etc). This is not required if those # command line tools are available from the path. xilinx_compiling_tools_path = ""
HoLyVieR/http-security-headers
refs/heads/master
output/HTMLOutput.py
1
import re from jinja2 import Template TEMPLATE_HTML = """ <html> <head> <title>{{ title }}</title> {% for file in javascript %} <script type="text/javascript" src="{{ file }}"></script> {% endfor %} {% for file in css %} <link type="text/css" href="{{ file }}" rel="stylesheet" /> {% endfor %} </head> <body> <div class="container"> <h1>{{ title }}</h1> <div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true"> <!-- RAW HEADERS --> <div class="panel panel-default"> <div class="panel-heading" role="tab"> <!-- TITLE --> <h4> <a role="button" data-toggle="collapse" aria-expanded="true" aria-controls="collapseRawHeader" href="#collapseRawHeader">Raw headers</a> </h4> <!-- /TITLE --> </div> <div id="collapseRawHeader" class="panel-collapse collapse in" role="tabpanel"> <div class="panel-body"> <!-- CONTENT --> <table class="table"> {% for header in headers | sort(attribute=0) %} <tr> <td style="width: 250px" class="active"><b>{{ header[0] | e }}</b></td> <td>{{ header[1] | e }}</td> </tr> {% endfor %} </table> <!-- /CONTENT --> </div> </div> </div> <!-- PARSED HEADER --> <div class="panel panel-default"> <div class="panel-heading" role="tab"> <!-- TITLE --> <h4> <a role="button" data-toggle="collapse" aria-expanded="true" aria-controls="collapseParsedHeader" href="#collapseParsedHeader">Parsed headers</a> </h4> <!-- /TITLE --> </div> <div id="collapseParsedHeader" class="panel-collapse collapse in" role="tabpanel"> <div class="panel-body"> <!-- CONTENT --> <table class="table"> {% for header in parsed | sort %} <tr> <td style="width: 250px" class="active"><b>{{ header }}</b></td> <td> <ul> {% for item in parsed[header] | sort %} {% if not item in ["warning", "info", "policy", "pin-sha256", "max-age"] and not parsed[header][item] == None %} <li><b>{{ item | capitalize | e }}</b> : {{ parsed[header][item] | e }}</li> {% endif %} {% if item == "max-age" and not parsed[header][item] == None %} <li><b>Max-age</b> : {{ filters["human_time"](parsed[header][item]) | e }}</li> {% endif %} {% if item == "pin-sha256" and not parsed[header][item] == None %} <li> <b>Pin-sha256</b> : <br /> <ul> {% for pin in parsed[header]["pin-sha256"] %} <li>{{ pin }}</li> {% endfor %} </ul> </li> {% endif %} {% if item == "policy" %} <li> <b>Policy</b> : <br /> <ul> {% for policy_name in parsed[header]["policy"] | sort %} {% if parsed[header]["policy"][policy_name] %} <li> <b>{{ filters["csp_name"](policy_name) | e }}</b><br /> <ul> {% if not parsed[header]["policy"][policy_name] is string %} {% for policy_value in parsed[header]["policy"][policy_name] %} <li>{{ filters["csp_value"](policy_value) }}</li> {% endfor %} {% else %} <li>{{ parsed[header]["policy"][policy_name] }}</li> {% endif %} </ul><br /> </li> {% endif %} {% endfor %} </ul> </li> {% endif %} {% endfor %} </ul> </td> </tr> {% endfor %} </table> <!-- /CONTENT --> </div> </div> </div> <!-- ANALYSIS --> <div class="panel panel-default"> <div class="panel-heading" role="tab"> <!-- TITLE --> <h4> <a role="button" data-toggle="collapse" aria-expanded="true" aria-controls="collapseAnalysis" href="#collapseAnalysis">Analysis</a> </h4> <!-- /TITLE --> </div> <div id="collapseAnalysis" class="panel-collapse collapse in" role="tabpanel"> <div class="panel-body"> <!-- CONTENT --> {% for report_name in report %} <h3>{{ report_name }}</h3> <ul> {% for report_element in report[report_name] %} {%if report_element["type"] == "warning" %} <li><div class="alert alert-warning">{{ report_element["message"] }}</div></li> {% endif %} {%if report_element["type"] == "error" %} <li><div class="alert alert-danger">{{ report_element["message"] }}</div></li> {% endif %} {%if report_element["type"] == "info" %} <li><div class="alert alert-info">{{ report_element["message"] }}</div></li> {% endif %} {% endfor %} </ul><br /> {% endfor %} <!-- /CONTENT --> </div> </div> </div> </div> </div> </body> </html> """ class HTMLOutput: def __init__(self): pass def output(self, url, headers, content, report, parsed_results): template = Template(TEMPLATE_HTML) result = template.render({ "title" : "HTML Report - " + url, "javascript" : [ "https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js" ], "css" : [ "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" ], "report" : report, "parsed" : parsed_results, "headers" : headers, "filters" : { "csp_name" : self._csp_name, "csp_value" : self._csp_value, "human_time" : self._human_time } }) return result def _human_time(self, value): remaining = value label = ["second", "minute", "hour", "day", "year"] mult = [60, 60, 24, 365, 100000] result = " " for i in range(len(label)): if remaining == 0: break value = remaining % mult[i] if not value == 0: if value == 1: result = " %d %s" % (value, label[i]) + result else: result = " %d %s" % (value, label[i] + "s") + result remaining = (remaining - value) / mult[i] return result def _csp_name(self, value): # Source : https://developer.mozilla.org/en-US/docs/Web/Security/CSP/CSP_policy_directives human_text = { "base-uri" : "URIs that a user agent may use as the document base URL are limited to", "child-src" : "Valid sources for web workers and nested browsing contexts loaded using elements such as <frame> and <iframe> are limited to", "connect-src" : "Valid sources for fetch, XMLHttpRequest, WebSocket, and EventSource connections are limited to", "font-src" : "Valid sources for fonts loaded using @font-face are limited to", "frame-src" : "Valid sources for web workers and nested browsing contexts loading using elements such as <frame> and <iframe> are limited to", "img-src" : "Valid sources of images and favicons are limited to", "manifest-src" : "Which manifest can be applied to the resource is limited to ", "media-src" : "Valid sources for loading media using the <audio> and <video> elements are limited to", "object-src" : "Valid sources for the <object>, <embed>, arend <applet> elements are limited to", "plugin-types" : "Valid plugins that the user agent may invoke are limited to", "referrer" : "Information in the referer (sic) header for links away from a page is limited to", "reflected-xss" : "Instruction to the user agent to activate or deactivate any heuristics used to filter or block reflected cross-site scripting attacks is set to", "report-uri" : "Content Security Policy violation will be reported to", "sandbox" : "Instruction to apply restrictions to a page's actions including preventing popups, preventing the execution of plugins and scripts, and enforcing a same-origin policy is set to", "script-src" : "Valid sources for JavaScript are limited to", "style-src" : "Valid sources for stylesheets are limited to", "upgrade-insecure-requests" : "Instruction to the user agents to treat all of a site's unsecure URL's (those serverd over HTTP) as though they have been replaced with secure URL's (those served over HTTPS) is set to" } return human_text[value] def _csp_value(self, value): magic_value = { "*" : "<span style='color: #FFA500'>All domain</span>", "'none'" : "No URLs will match", "'self'" : "Origin of the page", "'unsafe-inline'" : "<span style='color: #FFA500'>Inline resources</span>", "'unsafe-eval'" : "<span style='color: #FFA500'>eval() and similar methods for creating code from strings</span>", "data:" : "<span style='color: #FFA500'>Data URIs</span>", "mediastream:" : "Mediastream URIs", "blob:" : "Blob URIs", "filesystem:" : "Filesystem URIs", "http:" : "<span style='color: #FFA500'>Any domain with the protocol http</span>", "https:" : "<span style='color: #FFA500'>Any domain with the protocol https</span>", "wss:" : "<span style='color: #FFA500'>Any domain with the protocol wss</span>" } if value in magic_value: return "<b>" + magic_value[value] + "</b>" prefix = "" suffix = "" suffix2 = "" protocol_match = re.search("^([a-z\\-]+)\\:\\/\\/", value) if protocol_match: suffix2 = " using the protcol '%s' " % protocol_match.group(1) value = value[len(protocol_match.group(0)):] if value[:2] == "*.": value = value[2:] prefix = "Subdomains of " if value[-2:] == ":*": value = value[:-2] suffix = " on any port " return prefix + "<b>" + value + "</b> " + suffix + suffix2
eXcomm/cjdns
refs/heads/master
node_build/dependencies/libuv/build/gyp/test/subdirectory/gyptest-top-all.py
261
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies building a target and a subsidiary dependent target from a .gyp file in a subdirectory, without specifying an explicit output build directory, and using the generated solution or project file at the top of the tree as the entry point. There is a difference here in the default behavior of the underlying build tools. Specifically, when building the entire "solution", Xcode puts the output of each project relative to the .xcodeproj directory, while Visual Studio (and our implementation of Make) put it in a build directory relative to the "solution"--that is, the entry-point from which you built the entire tree. """ import TestGyp test = TestGyp.TestGyp() test.run_gyp('prog1.gyp', chdir='src') test.relocate('src', 'relocate/src') test.build('prog1.gyp', test.ALL, chdir='relocate/src') test.run_built_executable('prog1', stdout="Hello from prog1.c\n", chdir='relocate/src') if test.format == 'xcode': chdir = 'relocate/src/subdir' else: chdir = 'relocate/src' test.run_built_executable('prog2', chdir=chdir, stdout="Hello from prog2.c\n") test.pass_test()
opennode/nodeconductor
refs/heads/develop
waldur_core/server/admin/dashboard.py
1
from __future__ import unicode_literals from django.apps import apps from django.conf import settings from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from fluent_dashboard.dashboard import modules, FluentIndexDashboard, FluentAppIndexDashboard import six from waldur_core import __version__ from waldur_core.core import models as core_models, WaldurExtension from waldur_core.structure import models as structure_models, SupportedServices class CustomIndexDashboard(FluentIndexDashboard): """ Custom index dashboard for admin site. """ title = _('Waldur administration') def _get_installed_plugin_info(self): links = [] for ext in WaldurExtension.get_extensions(): app_config = self._get_app_config(ext.django_app()) if not app_config: # App is not found continue name = self._get_app_name(app_config) version = self._get_app_version(app_config) links.append( { 'title': '%s %s' % (name, version), 'url': 'http://docs.waldur.com/', 'external': True, 'attrs': {'target': '_blank'}, } ) # Order plugins by title links = sorted(links, key=lambda item: item['title'].lower()) # Core is the most important component, therefore # it should be the the first item in the list links.insert(0, { 'title': _('Waldur Core %s') % __version__, 'url': 'http://docs.waldur.com/', 'external': True, 'attrs': {'target': '_blank'}, }) return links def _get_app_config(self, app_name): """ Returns an app config for the given name, not by label. """ matches = [app_config for app_config in apps.get_app_configs() if app_config.name == app_name] if not matches: return return matches[0] def _get_app_name(self, app_config): """ Strip redundant prefixes, because some apps don't specify prefix, while others use deprecated prefix. """ return app_config.verbose_name\ .replace('Waldur', '')\ .strip() def _get_app_version(self, app_config): """ Some plugins ship multiple applications and extensions. However all of them have the same version, because they are released together. That's why only-top level module is used to fetch version information. """ base_name = app_config.__module__.split('.')[0] module = __import__(base_name) return getattr(module, '__version__', 'N/A') def _get_quick_access_info(self): """ Returns a list of ListLink items to be added to Quick Access tab. Contains: - links to Organizations, Projects and Users; - a link to shared service settings; - custom configured links in admin/settings FLUENT_DASHBOARD_QUICK_ACCESS_LINKS attribute; """ quick_access_links = [] # add custom links quick_access_links.extend(settings.FLUENT_DASHBOARD_QUICK_ACCESS_LINKS) for model in (structure_models.Project, structure_models.Customer, core_models.User, structure_models.SharedServiceSettings): quick_access_links.append(self._get_link_to_model(model)) return quick_access_links def _get_erred_resource_link(self, model, erred_amount, erred_state): result = self._get_link_to_model(model) result['title'] = _('%(num)s %(resources)s in ERRED state') % { 'num': erred_amount, 'resources': result['title'] } result['url'] = '%s?shared__exact=1&state__exact=%s' % (result['url'], erred_state) return result def _get_link_to_model(self, model): return { 'title': six.text_type(model._meta.verbose_name_plural).capitalize(), 'url': reverse('admin:%s_%s_changelist' % (model._meta.app_label, model._meta.model_name)), 'external': True, 'attrs': {'target': '_blank'}, } def _get_link_to_instance(self, instance): return { 'title': six.text_type(instance), 'url': reverse('admin:%s_%s_change' % (instance._meta.app_label, instance._meta.model_name), args=(instance.pk,)), 'external': True, 'attrs': {'target': '_blank'}, } def _get_erred_shared_settings_module(self): """ Returns a LinkList based module which contains link to shared service setting instances in ERRED state. """ result_module = modules.LinkList(title=_('Shared provider settings in erred state')) result_module.template = 'admin/dashboard/erred_link_list.html' erred_state = structure_models.SharedServiceSettings.States.ERRED queryset = structure_models.SharedServiceSettings.objects settings_in_erred_state = queryset.filter(state=erred_state).count() if settings_in_erred_state: result_module.title = '%s (%s)' % (result_module.title, settings_in_erred_state) for service_settings in queryset.filter(state=erred_state).iterator(): module_child = self._get_link_to_instance(service_settings) module_child['error'] = service_settings.error_message result_module.children.append(module_child) else: result_module.pre_content = _('Nothing found.') return result_module def _get_erred_resources_module(self): """ Returns a list of links to resources which are in ERRED state and linked to a shared service settings. """ result_module = modules.LinkList(title=_('Resources in erred state')) erred_state = structure_models.NewResource.States.ERRED children = [] resource_models = SupportedServices.get_resource_models() resources_in_erred_state_overall = 0 for resource_type, resource_model in resource_models.items(): queryset = resource_model.objects.filter(service_project_link__service__settings__shared=True) erred_amount = queryset.filter(state=erred_state).count() if erred_amount: resources_in_erred_state_overall = resources_in_erred_state_overall + erred_amount link = self._get_erred_resource_link(resource_model, erred_amount, erred_state) children.append(link) if resources_in_erred_state_overall: result_module.title = '%s (%s)' % (result_module.title, resources_in_erred_state_overall) result_module.children = children else: result_module.pre_content = _('Nothing found.') return result_module def __init__(self, **kwargs): FluentIndexDashboard.__init__(self, **kwargs) self.children.append(modules.LinkList( _('Installed components'), layout='stacked', enabled=False, draggable=True, deletable=True, collapsible=True, children=self._get_installed_plugin_info() )) self.children.append(modules.LinkList( _('Quick access'), children=self._get_quick_access_info()) ) self.children.append(self._get_erred_shared_settings_module()) self.children.append(self._get_erred_resources_module()) class CustomAppIndexDashboard(FluentAppIndexDashboard): def __init__(self, app_title, models, **kwargs): super(CustomAppIndexDashboard, self).__init__(app_title, models, **kwargs) path = self._get_app_models_path() self.children = [modules.ModelList(title=app_title, models=[path])] def _get_app_models_path(self): return '%s.models.*' % self.app_title.replace(' ', '.', 1).replace(' ', '_').lower()
Ban3/Limnoria
refs/heads/master
plugins/Lart/__init__.py
4
### # Copyright (c) 2005, Daniel DiPaolo # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### """ This plugin keeps a database of larts, and larts with it. """ import supybot import supybot.world as world # Use this for the version of this plugin. You may wish to put a CVS keyword # in here if you're keeping the plugin in CVS or some similar system. __version__ = "%%VERSION%%" # XXX Replace this with an appropriate author or supybot.Author instance. __author__ = supybot.authors.strike # This is a dictionary mapping supybot.Author instances to lists of # contributions. __contributors__ = {} from . import config from . import plugin from imp import reload reload(plugin) # In case we're being reloaded. # Add more reloads here if you add third-party modules and want them to be # reloaded when this plugin is reloaded. Don't forget to import them as well! if world.testing: from . import test Class = plugin.Class configure = config.configure # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
markwang2764/sku
refs/heads/master
node_modules/.3.5.0@node-gyp/gyp/gyptest.py
1752
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __doc__ = """ gyptest.py -- test runner for GYP tests. """ import os import optparse import subprocess import sys class CommandRunner(object): """ Executor class for commands, including "commands" implemented by Python functions. """ verbose = True active = True def __init__(self, dictionary={}): self.subst_dictionary(dictionary) def subst_dictionary(self, dictionary): self._subst_dictionary = dictionary def subst(self, string, dictionary=None): """ Substitutes (via the format operator) the values in the specified dictionary into the specified command. The command can be an (action, string) tuple. In all cases, we perform substitution on strings and don't worry if something isn't a string. (It's probably a Python function to be executed.) """ if dictionary is None: dictionary = self._subst_dictionary if dictionary: try: string = string % dictionary except TypeError: pass return string def display(self, command, stdout=None, stderr=None): if not self.verbose: return if type(command) == type(()): func = command[0] args = command[1:] s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args))) if type(command) == type([]): # TODO: quote arguments containing spaces # TODO: handle meta characters? s = ' '.join(command) else: s = self.subst(command) if not s.endswith('\n'): s += '\n' sys.stdout.write(s) sys.stdout.flush() def execute(self, command, stdout=None, stderr=None): """ Executes a single command. """ if not self.active: return 0 if type(command) == type(''): command = self.subst(command) cmdargs = shlex.split(command) if cmdargs[0] == 'cd': command = (os.chdir,) + tuple(cmdargs[1:]) if type(command) == type(()): func = command[0] args = command[1:] return func(*args) else: if stdout is sys.stdout: # Same as passing sys.stdout, except python2.4 doesn't fail on it. subout = None else: # Open pipe for anything else so Popen works on python2.4. subout = subprocess.PIPE if stderr is sys.stderr: # Same as passing sys.stderr, except python2.4 doesn't fail on it. suberr = None elif stderr is None: # Merge with stdout if stderr isn't specified. suberr = subprocess.STDOUT else: # Open pipe for anything else so Popen works on python2.4. suberr = subprocess.PIPE p = subprocess.Popen(command, shell=(sys.platform == 'win32'), stdout=subout, stderr=suberr) p.wait() if stdout is None: self.stdout = p.stdout.read() elif stdout is not sys.stdout: stdout.write(p.stdout.read()) if stderr not in (None, sys.stderr): stderr.write(p.stderr.read()) return p.returncode def run(self, command, display=None, stdout=None, stderr=None): """ Runs a single command, displaying it first. """ if display is None: display = command self.display(display) return self.execute(command, stdout, stderr) class Unbuffered(object): def __init__(self, fp): self.fp = fp def write(self, arg): self.fp.write(arg) self.fp.flush() def __getattr__(self, attr): return getattr(self.fp, attr) sys.stdout = Unbuffered(sys.stdout) sys.stderr = Unbuffered(sys.stderr) def is_test_name(f): return f.startswith('gyptest') and f.endswith('.py') def find_all_gyptest_files(directory): result = [] for root, dirs, files in os.walk(directory): if '.svn' in dirs: dirs.remove('.svn') result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ]) result.sort() return result def main(argv=None): if argv is None: argv = sys.argv usage = "gyptest.py [-ahlnq] [-f formats] [test ...]" parser = optparse.OptionParser(usage=usage) parser.add_option("-a", "--all", action="store_true", help="run all tests") parser.add_option("-C", "--chdir", action="store", default=None, help="chdir to the specified directory") parser.add_option("-f", "--format", action="store", default='', help="run tests with the specified formats") parser.add_option("-G", '--gyp_option', action="append", default=[], help="Add -G options to the gyp command line") parser.add_option("-l", "--list", action="store_true", help="list available tests and exit") parser.add_option("-n", "--no-exec", action="store_true", help="no execute, just print the command line") parser.add_option("--passed", action="store_true", help="report passed tests") parser.add_option("--path", action="append", default=[], help="additional $PATH directory") parser.add_option("-q", "--quiet", action="store_true", help="quiet, don't print test command lines") opts, args = parser.parse_args(argv[1:]) if opts.chdir: os.chdir(opts.chdir) if opts.path: extra_path = [os.path.abspath(p) for p in opts.path] extra_path = os.pathsep.join(extra_path) os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH'] if not args: if not opts.all: sys.stderr.write('Specify -a to get all tests.\n') return 1 args = ['test'] tests = [] for arg in args: if os.path.isdir(arg): tests.extend(find_all_gyptest_files(os.path.normpath(arg))) else: if not is_test_name(os.path.basename(arg)): print >>sys.stderr, arg, 'is not a valid gyp test name.' sys.exit(1) tests.append(arg) if opts.list: for test in tests: print test sys.exit(0) CommandRunner.verbose = not opts.quiet CommandRunner.active = not opts.no_exec cr = CommandRunner() os.environ['PYTHONPATH'] = os.path.abspath('test/lib') if not opts.quiet: sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH']) passed = [] failed = [] no_result = [] if opts.format: format_list = opts.format.split(',') else: # TODO: not duplicate this mapping from pylib/gyp/__init__.py format_list = { 'aix5': ['make'], 'freebsd7': ['make'], 'freebsd8': ['make'], 'openbsd5': ['make'], 'cygwin': ['msvs'], 'win32': ['msvs', 'ninja'], 'linux2': ['make', 'ninja'], 'linux3': ['make', 'ninja'], 'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'], }[sys.platform] for format in format_list: os.environ['TESTGYP_FORMAT'] = format if not opts.quiet: sys.stdout.write('TESTGYP_FORMAT=%s\n' % format) gyp_options = [] for option in opts.gyp_option: gyp_options += ['-G', option] if gyp_options and not opts.quiet: sys.stdout.write('Extra Gyp options: %s\n' % gyp_options) for test in tests: status = cr.run([sys.executable, test] + gyp_options, stdout=sys.stdout, stderr=sys.stderr) if status == 2: no_result.append(test) elif status: failed.append(test) else: passed.append(test) if not opts.quiet: def report(description, tests): if tests: if len(tests) == 1: sys.stdout.write("\n%s the following test:\n" % description) else: fmt = "\n%s the following %d tests:\n" sys.stdout.write(fmt % (description, len(tests))) sys.stdout.write("\t" + "\n\t".join(tests) + "\n") if opts.passed: report("Passed", passed) report("Failed", failed) report("No result from", no_result) if failed: return 1 else: return 0 if __name__ == "__main__": sys.exit(main())
schmunk42/compose
refs/heads/master
tests/__init__.py
20
from __future__ import absolute_import from __future__ import unicode_literals import sys if sys.version_info >= (2, 7): import unittest # NOQA else: import unittest2 as unittest # NOQA try: from unittest import mock except ImportError: import mock # NOQA
aurelijusb/arangodb
refs/heads/devel
3rdParty/V8-4.3.61/build/gyp/test/win/gyptest-macro-projectname.py
344
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure macro expansion of $(ProjectName) is handled. """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'vs-macros' test.run_gyp('projectname.gyp', chdir=CHDIR) test.build('projectname.gyp', test.ALL, chdir=CHDIR) test.built_file_must_exist('test_expansions_plus_something.exe', chdir=CHDIR) test.built_file_must_exist( 'test_with_product_name_plus_something.exe', chdir=CHDIR) test.pass_test()
minhtuancn/odoo
refs/heads/8.0
addons/website_sale/models/product.py
262
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import osv, fields class product_style(osv.Model): _name = "product.style" _columns = { 'name' : fields.char('Style Name', required=True), 'html_class': fields.char('HTML Classes'), } class product_pricelist(osv.Model): _inherit = "product.pricelist" _columns = { 'code': fields.char('Promotional Code'), } class product_public_category(osv.osv): _name = "product.public.category" _description = "Public Category" _order = "sequence, name" _constraints = [ (osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id']) ] def name_get(self, cr, uid, ids, context=None): res = [] for cat in self.browse(cr, uid, ids, context=context): names = [cat.name] pcat = cat.parent_id while pcat: names.append(pcat.name) pcat = pcat.parent_id res.append((cat.id, ' / '.join(reversed(names)))) return res def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None): res = self.name_get(cr, uid, ids, context=context) return dict(res) def _get_image(self, cr, uid, ids, name, args, context=None): result = dict.fromkeys(ids, False) for obj in self.browse(cr, uid, ids, context=context): result[obj.id] = tools.image_get_resized_images(obj.image) return result def _set_image(self, cr, uid, id, name, value, args, context=None): return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context) _columns = { 'name': fields.char('Name', required=True, translate=True), 'complete_name': fields.function(_name_get_fnc, type="char", string='Name'), 'parent_id': fields.many2one('product.public.category','Parent Category', select=True), 'child_id': fields.one2many('product.public.category', 'parent_id', string='Children Categories'), 'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."), # NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail # for at least one category, then we display a default image on the other, so that the buttons have consistent styling. # In this case, the default image is set by the js code. # NOTE2: image: all image fields are base64 encoded and PIL-supported 'image': fields.binary("Image", help="This field holds the image used as image for the category, limited to 1024x1024px."), 'image_medium': fields.function(_get_image, fnct_inv=_set_image, string="Medium-sized image", type="binary", multi="_get_image", store={ 'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Medium-sized image of the category. It is automatically "\ "resized as a 128x128px image, with aspect ratio preserved. "\ "Use this field in form views or some kanban views."), 'image_small': fields.function(_get_image, fnct_inv=_set_image, string="Smal-sized image", type="binary", multi="_get_image", store={ 'product.public.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Small-sized image of the category. It is automatically "\ "resized as a 64x64px image, with aspect ratio preserved. "\ "Use this field anywhere a small image is required."), } class product_template(osv.Model): _inherit = ["product.template", "website.seo.metadata"] _order = 'website_published desc, website_sequence desc, name' _name = 'product.template' _mail_post_access = 'read' def _website_url(self, cr, uid, ids, field_name, arg, context=None): res = dict.fromkeys(ids, '') for product in self.browse(cr, uid, ids, context=context): res[product.id] = "/shop/product/%s" % (product.id,) return res _columns = { # TODO FIXME tde: when website_mail/mail_thread.py inheritance work -> this field won't be necessary 'website_message_ids': fields.one2many( 'mail.message', 'res_id', domain=lambda self: [ '&', ('model', '=', self._name), ('type', '=', 'comment') ], string='Website Comments', ), 'website_published': fields.boolean('Available in the website', copy=False), 'website_description': fields.html('Description for the website', translate=True), 'alternative_product_ids': fields.many2many('product.template','product_alternative_rel','src_id','dest_id', string='Alternative Products', help='Appear on the product page'), 'accessory_product_ids': fields.many2many('product.product','product_accessory_rel','src_id','dest_id', string='Accessory Products', help='Appear on the shopping cart'), 'website_size_x': fields.integer('Size X'), 'website_size_y': fields.integer('Size Y'), 'website_style_ids': fields.many2many('product.style', string='Styles'), 'website_sequence': fields.integer('Sequence', help="Determine the display order in the Website E-commerce"), 'website_url': fields.function(_website_url, string="Website url", type="char"), 'public_categ_ids': fields.many2many('product.public.category', string='Public Category', help="Those categories are used to group similar products for e-commerce."), } def _defaults_website_sequence(self, cr, uid, *l, **kwargs): cr.execute('SELECT MAX(website_sequence)+1 FROM product_template') next_sequence = cr.fetchone()[0] or 0 return next_sequence _defaults = { 'website_size_x': 1, 'website_size_y': 1, 'website_sequence': _defaults_website_sequence, 'website_published': False, } def set_sequence_top(self, cr, uid, ids, context=None): cr.execute('SELECT MAX(website_sequence) FROM product_template') max_sequence = cr.fetchone()[0] or 0 return self.write(cr, uid, ids, {'website_sequence': max_sequence + 1}, context=context) def set_sequence_bottom(self, cr, uid, ids, context=None): cr.execute('SELECT MIN(website_sequence) FROM product_template') min_sequence = cr.fetchone()[0] or 0 return self.write(cr, uid, ids, {'website_sequence': min_sequence -1}, context=context) def set_sequence_up(self, cr, uid, ids, context=None): product = self.browse(cr, uid, ids[0], context=context) cr.execute(""" SELECT id, website_sequence FROM product_template WHERE website_sequence > %s AND website_published = %s ORDER BY website_sequence ASC LIMIT 1""" % (product.website_sequence, product.website_published)) prev = cr.fetchone() if prev: self.write(cr, uid, [prev[0]], {'website_sequence': product.website_sequence}, context=context) return self.write(cr, uid, [ids[0]], {'website_sequence': prev[1]}, context=context) else: return self.set_sequence_top(cr, uid, ids, context=context) def set_sequence_down(self, cr, uid, ids, context=None): product = self.browse(cr, uid, ids[0], context=context) cr.execute(""" SELECT id, website_sequence FROM product_template WHERE website_sequence < %s AND website_published = %s ORDER BY website_sequence DESC LIMIT 1""" % (product.website_sequence, product.website_published)) next = cr.fetchone() if next: self.write(cr, uid, [next[0]], {'website_sequence': product.website_sequence}, context=context) return self.write(cr, uid, [ids[0]], {'website_sequence': next[1]}, context=context) else: return self.set_sequence_bottom(cr, uid, ids, context=context) class product_product(osv.Model): _inherit = "product.product" def _website_url(self, cr, uid, ids, field_name, arg, context=None): res = {} for product in self.browse(cr, uid, ids, context=context): res[product.id] = "/shop/product/%s" % (product.product_tmpl_id.id,) return res _columns = { 'website_url': fields.function(_website_url, string="Website url", type="char"), } class product_attribute(osv.Model): _inherit = "product.attribute" _columns = { 'type': fields.selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color'), ('hidden', 'Hidden')], string="Type"), } _defaults = { 'type': lambda *a: 'radio', } class product_attribute_value(osv.Model): _inherit = "product.attribute.value" _columns = { 'color': fields.char("HTML Color Index", help="Here you can set a specific HTML color index (e.g. #ff0000) to display the color on the website if the attibute type is 'Color'."), }
danielvdende/VizServe
refs/heads/master
util/messages.py
1
# This file contains default error messages to be used. notFound = { "status": "error", "type": "404 Not found", "message": "Not found error" } serverError = { "status": "error", "type": "500 Server error", "message": "Unknown internal server error" } badRequest = { "status": "error", "type": "400 Bad request", "message": "Bad request, please check your syntax and try again" }
yawd/django-sphinxdoc
refs/heads/master
sphinxdoc/models.py
2
# encoding: utf-8 """ Models for django-sphinxdoc. """ from django.db import models from sphinxdoc.validators import validate_isdir class Project(models.Model): """ Represents a Sphinx project. Each ``Project`` has a name, a slug and a path to the root directory of a Sphinx project (where Sphinx’ ``conf.py``) is located). """ name = models.CharField(max_length=100) slug = models.SlugField(unique=True, help_text=u'Used in the URL for the project. Must be unique.') path = models.CharField(max_length=255, validators=[validate_isdir], help_text=u'Directory that contains Sphinx’ <tt>conf.py</tt>.') def __unicode__(self): return self.name @models.permalink def get_absolute_url(self): return ('doc-index', (), {'slug': self.slug}) class Document(models.Model): """ Represents a JSON encoded Sphinx document. The attributes ``title`` and ``body`` dubicate the corresponding keys in ``content`` and are used for the Haystack search. """ project = models.ForeignKey(Project) path = models.CharField(max_length=255) content = models.TextField() title = models.CharField(max_length=255) body = models.TextField(blank=True) def __unicode__(self): return self.path @models.permalink def get_absolute_url(self): return ('doc-detail', (), { 'slug': self.project.slug, 'path': self.path, })
RAPD/RAPD
refs/heads/master
src/old_agents/subcontractors/xdsme/new/xdsme-0.4.9/XOconv/pycgtypes/vec4.py
12
#################################################################### # vec4 - 4-dimensional vector # # Copyright (C) 2002, Matthias Baas ([email protected]) # # You may distribute under the terms of the BSD license, as # specified in the file license.txt. #################################################################### import types, math # vec4 class vec4: """Four-dimensional vector. This class represents a 4D vector. """ def __init__(self, *args): """Constructor. There are several possibilities how to initialize a vector: v = vec4() -> v = <0,0,0,0> v = vec4(a) -> v = <a,a,a,a> v = vec4(x,y) -> v = <x,y,0,0> v = vec4(x,y,z) -> v = <x,y,z,0> v = vec4(x,y,z,w) -> v = <x,y,z,w> Note that specifying just one value sets all four components to that value. Additionally you can wrap those values in a list or a tuple or specify them as a string: v = vec4([1,2,3]) -> v = <1,2,3,0> v = vec4("4,5") -> v = <4,5,0,0> """ if len(args)==0: self.x, self.y, self.z, self.w = (0.0, 0.0, 0.0, 0.0) elif len(args)==1: T = type(args[0]) # scalar if T==types.FloatType or T==types.IntType or T==types.LongType: self.x, self.y, self.z, self.w = (args[0], args[0], args[0], args[0]) # vec4 elif isinstance(args[0], vec4): self.x, self.y, self.z, self.w = args[0] # Tuple/List elif T==types.TupleType or T==types.ListType: if len(args[0])==0: self.x = self.y = self.z = self.w = 0.0 elif len(args[0])==1: self.x = self.y = self.z = args[0][0] self.w = 0.0 elif len(args[0])==2: self.x, self.y = args[0] self.z = 0.0 self.w = 0.0 elif len(args[0])==3: self.x, self.y, self.z = args[0] self.w = 0.0 elif len(args[0])==4: self.x, self.y, self.z, self.w = args[0] else: raise TypeError, "vec4() takes at most 4 arguments" # String elif T==types.StringType: s=args[0].replace(","," ").replace(" "," ").strip().split(" ") if s==[""]: s=[] f=map(lambda x: float(x), s) dummy = vec4(f) self.x, self.y, self.z, self.w = dummy # error else: raise TypeError,"vec4() arg can't be converted to vec4" elif len(args)==2: self.x, self.y = args self.z, self.w = (0.0, 0.0) elif len(args)==3: self.x, self.y, self.z = args self.w = 0.0 elif len(args)==4: self.x, self.y, self.z, self.w = args else: raise TypeError, "vec4() takes at most 4 arguments" def __repr__(self): return 'vec4('+`self.x`+', '+`self.y`+', '+`self.z`+', '+`self.w`+')' def __str__(self): fmt="%1.4f" return '('+fmt%self.x+', '+fmt%self.y+', '+fmt%self.z+', '+fmt%self.w+')' def __eq__(self, other): """== operator >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> b=vec4(-0.3, 0.75, 0.5, 0.6) >>> c=vec4(-0.3, 0.75, 0.5, 0.6) >>> print a==b 0 >>> print b==c 1 >>> print a==None 0 """ if isinstance(other, vec4): return self.x==other.x and self.y==other.y and self.z==other.z else: return 0 def __ne__(self, other): """!= operator >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> b=vec4(-0.3, 0.75, 0.5, 0.6) >>> c=vec4(-0.3, 0.75, 0.5, 0.6) >>> print a!=b 1 >>> print b!=c 0 >>> print a!=None 1 """ if isinstance(other, vec4): return self.x!=other.x or self.y!=other.y or self.z!=other.z else: return 1 def __add__(self, other): """Vector addition. >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> b=vec4(-0.3, 0.75, 0.5, 0.3) >>> print a+b (0.7000, 1.2500, -1.3000, 0.5000) """ if isinstance(other, vec4): return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w) else: raise TypeError, "unsupported operand type for +" def __sub__(self, other): """Vector subtraction. >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> b=vec4(-0.3, 0.75, 0.5, 0.3) >>> print a-b (1.3000, -0.2500, -2.3000, -0.1000) """ if isinstance(other, vec4): return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w) else: raise TypeError, "unsupported operand type for -" def __mul__(self, other): """Multiplication with a scalar or dot product. >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> b=vec4(-0.3, 0.75, 0.5, 0.3) >>> print a*2.0 (2.0000, 1.0000, -3.6000, 0.4000) >>> print 2.0*a (2.0000, 1.0000, -3.6000, 0.4000) >>> print a*b -0.765 """ T = type(other) # vec4*scalar if T==types.FloatType or T==types.IntType or T==types.LongType: return vec4(self.x*other, self.y*other, self.z*other, self.w*other) # vec4*vec4 if isinstance(other, vec4): return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w # unsupported else: # Try to delegate the operation to the other operand if getattr(other,"__rmul__",None)!=None: return other.__rmul__(self) else: raise TypeError, "unsupported operand type for *" __rmul__ = __mul__ def __div__(self, other): """Division by scalar >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> print a/2.0 (0.5000, 0.2500, -0.9000, 0.1000) """ T = type(other) # vec4/scalar if T==types.FloatType or T==types.IntType or T==types.LongType: return vec4(self.x/other, self.y/other, self.z/other, self.w/other) # unsupported else: raise TypeError, "unsupported operand type for /" def __mod__(self, other): """Modulo (component wise) >>> a=vec4(3.0, 2.5, -1.8, 0.2) >>> print a%2.0 (1.0000, 0.5000, 0.2000, 0.2000) """ T = type(other) # vec4%scalar if T==types.FloatType or T==types.IntType or T==types.LongType: return vec4(self.x%other, self.y%other, self.z%other, self.w%other) # unsupported else: raise TypeError, "unsupported operand type for %" def __iadd__(self, other): """Inline vector addition. >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> b=vec4(-0.3, 0.75, 0.5, 0.3) >>> a+=b >>> print a (0.7000, 1.2500, -1.3000, 0.5000) """ if isinstance(other, vec4): self.x+=other.x self.y+=other.y self.z+=other.z self.w+=other.w return self else: raise TypeError, "unsupported operand type for +=" def __isub__(self, other): """Inline vector subtraction. >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> b=vec4(-0.3, 0.75, 0.5, 0.3) >>> a-=b >>> print a (1.3000, -0.2500, -2.3000, -0.1000) """ if isinstance(other, vec4): self.x-=other.x self.y-=other.y self.z-=other.z self.w-=other.w return self else: raise TypeError, "unsupported operand type for -=" def __imul__(self, other): """Inline multiplication (only with scalar) >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> a*=2.0 >>> print a (2.0000, 1.0000, -3.6000, 0.4000) """ T = type(other) # vec4*=scalar if T==types.FloatType or T==types.IntType or T==types.LongType: self.x*=other self.y*=other self.z*=other self.w*=other return self else: raise TypeError, "unsupported operand type for *=" def __idiv__(self, other): """Inline division with scalar >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> a/=2.0 >>> print a (0.5000, 0.2500, -0.9000, 0.1000) """ T = type(other) # vec4/=scalar if T==types.FloatType or T==types.IntType or T==types.LongType: self.x/=other self.y/=other self.z/=other self.w/=other return self else: raise TypeError, "unsupported operand type for /=" def __imod__(self, other): """Inline modulo >>> a=vec4(3.0, 2.5, -1.8, 0.2) >>> a%=2.0 >>> print a (1.0000, 0.5000, 0.2000, 0.2000) """ T = type(other) # vec4%=scalar if T==types.FloatType or T==types.IntType or T==types.LongType: self.x%=other self.y%=other self.z%=other self.w%=other return self else: raise TypeError, "unsupported operand type for %=" def __neg__(self): """Negation >>> a=vec4(3.0, 2.5, -1.8, 0.2) >>> print -a (-3.0000, -2.5000, 1.8000, -0.2000) """ return vec4(-self.x, -self.y, -self.z, -self.w) def __pos__(self): """ >>> a=vec4(3.0, 2.5, -1.8, 0.2) >>> print +a (3.0000, 2.5000, -1.8000, 0.2000) """ return vec4(+self.x, +self.y, +self.z, +self.w) def __abs__(self): """Return the length of the vector. abs(v) is equivalent to v.length(). >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> print abs(a) 2.12837966538 """ return math.sqrt(self*self) def __len__(self): """Length of the sequence (always 4)""" return 4 def __getitem__(self, key): """Return a component by index (0-based) >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> print a[0] 1.0 >>> print a[1] 0.5 >>> print a[2] -1.8 >>> print a[3] 0.2 """ T=type(key) if T!=types.IntType and T!=types.LongType: raise TypeError, "index must be integer" if key==0: return self.x elif key==1: return self.y elif key==2: return self.z elif key==3: return self.w else: raise IndexError,"index out of range" def __setitem__(self, key, value): """Set a component by index (0-based) >>> a=vec4() >>> a[0]=1.5; a[1]=0.7; a[2]=-0.3; a[3]=0.2 >>> print a (1.5000, 0.7000, -0.3000, 0.2000) """ T=type(key) if T!=types.IntType and T!=types.LongType: raise TypeError, "index must be integer" if key==0: self.x = value elif key==1: self.y = value elif key==2: self.z = value elif key==3: self.w = value else: raise IndexError,"index out of range" def length(self): """Return the length of the vector. v.length() is equivalent to abs(v). >>> a=vec4(1.0, 0.5, -1.8, 0.2) >>> print a.length() 2.12837966538 """ return math.sqrt(self*self) def normalize(self): """Return normalized vector. >>> a=vec4(1.0, 0.5, -1.8, 1.2) >>> print a.normalize() (0.4107, 0.2053, -0.7392, 0.4928) """ nlen = 1.0/math.sqrt(self*self) return vec4(self.x*nlen, self.y*nlen, self.z*nlen, self.w*nlen) ###################################################################### def _test(): import doctest, vec4 failed, total = doctest.testmod(vec4) print "%d/%d failed" % (failed, total) if __name__=="__main__": _test()
vmindru/ansible
refs/heads/devel
test/units/modules/network/f5/test_bigiq_application_fasthttp.py
21
# -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigiq_application_fasthttp import ApiParameters from library.modules.bigiq_application_fasthttp import ModuleParameters from library.modules.bigiq_application_fasthttp import ModuleManager from library.modules.bigiq_application_fasthttp import ArgumentSpec # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.compat.mock import patch from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigiq_application_fasthttp import ApiParameters from ansible.modules.network.f5.bigiq_application_fasthttp import ModuleParameters from ansible.modules.network.f5.bigiq_application_fasthttp import ModuleManager from ansible.modules.network.f5.bigiq_application_fasthttp import ArgumentSpec # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.compat.mock import patch from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', description='my description', service_environment='bar', servers=[ dict( address='1.2.3.4', port=8080 ), dict( address='5.6.7.8', port=8000 ) ], inbound_virtual=dict( address='2.2.2.2', netmask='255.255.255.255', port=80 ) ) p = ModuleParameters(params=args) assert p.name == 'foo' assert p.config_set_name == 'foo' assert p.sub_path == 'foo' assert p.http_profile == 'profile_http' assert p.service_environment == 'bar' assert len(p.servers) == 2 assert 'address' in p.servers[0] assert 'port' in p.servers[0] assert 'address' in p.servers[1] assert 'port' in p.servers[1] assert p.servers[0]['address'] == '1.2.3.4' assert p.servers[0]['port'] == 8080 assert p.servers[1]['address'] == '5.6.7.8' assert p.servers[1]['port'] == 8000 assert 'address' in p.inbound_virtual assert 'netmask' in p.inbound_virtual assert 'port' in p.inbound_virtual assert p.inbound_virtual['address'] == '2.2.2.2' assert p.inbound_virtual['netmask'] == '255.255.255.255' assert p.inbound_virtual['port'] == 80 class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() self.patcher1 = patch('time.sleep') self.patcher1.start() def tearDown(self): self.patcher1.stop() def test_create(self, *args): set_module_args(dict( name='foo', description='my description', service_environment='bar', servers=[ dict( address='1.2.3.4', port=8080 ), dict( address='5.6.7.8', port=8000 ) ], inbound_virtual=dict( address='2.2.2.2', netmask='255.255.255.255', port=80 ), provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods to force specific logic in the module to happen mm = ModuleManager(module=module) mm.has_no_service_environment = Mock(return_value=False) mm.wait_for_apply_template_task = Mock(return_value=True) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(side_effect=[False, True]) results = mm.exec_module() assert results['changed'] is True assert results['description'] == 'my description'
osantana/pactum
refs/heads/master
samples/orders_api.py
1
import pactum from pactum import fields, verbs class SKUField(fields.Field): extensions = {'openapi.type': 'string'} class MoneyField(fields.DecimalField): extensions = {'openapi.type': 'float'} precision = 2 class ItemResource(pactum.Resource): fields = [ fields.IntegerField(name="id"), fields.PositiveIntegerField(name="quantity"), SKUField(name="sku"), fields.StringField(name="description"), MoneyField(name="price", precision=2), MoneyField(name="total", precision=2), ] class ItemListResource(pactum.ListResource): resource = ItemResource() class OrderResource(pactum.Resource): fields = [ fields.StringField(name="code"), fields.TimestampField(name="created_at"), fields.ResourceField(name="items", resource=ItemListResource()), MoneyField(name="total"), ] order_resource = OrderResource() class OrderListResource(pactum.ListResource): resource = order_resource class OrderListRoute(pactum.Route): path = "/orders" actions = [ pactum.Action( request=pactum.Request(verb=verbs.GET), responses=[ pactum.Response( status=200, body=OrderListResource() ) ], description='List Orders' ) ] querystrings = [ pactum.Querystring( name='limit', type=fields.IntegerField, description='Limits the number of order in the response' ), ] class OrderDetailRoute(pactum.Route): path = "/order/{code}" actions = [ pactum.Action( request=pactum.Request(verb=verbs.GET), responses=[ pactum.Response( status=200, body=order_resource) ], description='Retrieve order by code.' ) ] api = pactum.API( name="Orders API", versions=[ pactum.Version( name="v1", routes=[ OrderListRoute(), OrderDetailRoute(), ] ) ] )
osin-vladimir/ms-thesis-skoltech
refs/heads/master
mxnet-ssd/train_tir.py
1
import argparse import tools.find_mxnet import mxnet as mx import os import sys from train.train_tir_net import train_tir_net import datetime def parse_args(): parser = argparse.ArgumentParser(description='Train a Single-shot detection network') # data set params parser.add_argument('--dataset', dest='dataset', help='which dataset to use', default='kaist', type=str) parser.add_argument('--image-set', dest='image_set', help='train set, can be trainval or train', default='train', type=str) parser.add_argument('--val-image-set', dest='val_image_set', help='validation set, can be val or test', default='test', type=str) parser.add_argument('--dataset-path', dest='dataset_path', help='dataset path', default=os.path.join(os.getcwd(), 'data'), type=str) parser.add_argument('--network', dest='network', type=str, default='spectral', help='which network to use') parser.add_argument('--batch-size', dest='batch_size', type=int, default=32, help='training batch size') parser.add_argument('--resume', dest='resume', type=int, default=-1, help='resume training from epoch n') parser.add_argument('--finetune', dest='finetune', type=int, default=-1, help='finetune from epoch n, rename the model before doing this') parser.add_argument('--pretrained', dest='pretrained', help='pretrained model prefix', default=os.path.join(os.getcwd(), 'model'), type=str) parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model', default=0, type=int) parser.add_argument('--prefix', dest='prefix', help='new model prefix', default=os.path.join(os.getcwd(), 'model'), type=str) parser.add_argument('--gpus', dest='gpus', help='GPU devices to train with', default='0', type=str) parser.add_argument('--begin-epoch', dest='begin_epoch', help='begin epoch of training', default=0, type=int) parser.add_argument('--end-epoch', dest='end_epoch', help='end epoch of training', default=300, type=int) parser.add_argument('--frequent', dest='frequent', help='frequency of logging', default=10, type=int) parser.add_argument('--data-shape', dest='data_shape', type=int, default=300, help='set image shape') parser.add_argument('--lr', dest='learning_rate', type=float, default=0.00001, help='learning rate') parser.add_argument('--momentum', dest='momentum', type=float, default=0.9, help='momentum') parser.add_argument('--wd', dest='weight_decay', type=float, default=0.000001, help='weight decay') parser.add_argument('--lr-epoch', dest='lr_refactor_epoch', type=int, default=25, help='refactor learning rate every N epoch') parser.add_argument('--lr-ratio', dest='lr_refactor_ratio', type=float, default=0.8, help='ratio to refactor learning rate') parser.add_argument('--log', dest='log_file', type=str, default="train.log", help='save training log to file') parser.add_argument('--monitor', dest='monitor', type=int, default=0, help='log network parameters every N iters if larger than 0') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() args.dataset_path = args.dataset_path + '/' + args.dataset args.pretrained += '/' + args.network + '/' + args.network args.prefix += '/' + args.network + '/training_epochs/ssd' args.log_file = os.path.join(os.getcwd(), 'model', args.network, "train-"+str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))+".log") print print "//////////////////////////////////////////////////////////////////////////" print 'Parameters : ' args_dict = vars(args) for key, value in args_dict.items(): print key, ':', value print "//////////////////////////////////////////////////////////////////////////" print if args.dataset == 'kaist': mean_rgb = [89.909961557, 83.8302041534, 74.1431794542] std_rgb = [65.1171282799, 62.1827802828, 61.1897309395] mean_tir = [42.6318449296] std_tir = [27.2190767513] if args.dataset == 'cvc': mean_rgb = [] std_rgb = [] mean_tir = [] std_tir = [] ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')] ctx = mx.cpu() if not ctx else ctx train_tir_net(args.network, args.image_set, args.dataset_path, args.batch_size, args.data_shape, mean_rgb, std_rgb, mean_tir, std_tir, args.resume, args.finetune, args.pretrained, args.epoch, args.prefix, ctx, args.begin_epoch, args.end_epoch, args.frequent, args.learning_rate, args.weight_decay, args.val_image_set, args.lr_refactor_epoch, args.lr_refactor_ratio, args.monitor, args.log_file, args.momentum)
SlimRemix/android_external_chromium_org
refs/heads/lp5.1
third_party/closure_compiler/processor.py
32
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Process Chrome resources (HTML/CSS/JS) to handle <include> and <if> tags.""" from collections import defaultdict import re import os class LineNumber(object): """A simple wrapper to hold line information (e.g. file.js:32). Args: source_file: A file path. line_number: The line in |file|. """ def __init__(self, source_file, line_number): self.file = source_file self.line_number = int(line_number) class FileCache(object): """An in-memory cache to speed up reading the same files over and over. Usage: FileCache.read(path_to_file) """ _cache = defaultdict(str) @classmethod def read(self, source_file): """Read a file and return it as a string. Args: source_file: a file to read and return the contents of. Returns: |file| as a string. """ abs_file = os.path.abspath(source_file) self._cache[abs_file] = self._cache[abs_file] or open(abs_file, "r").read() return self._cache[abs_file] class Processor(object): """Processes resource files, inlining the contents of <include> tags, removing <if> tags, and retaining original line info. For example 1: /* blah.js */ 2: <if expr="is_win"> 3: <include src="win.js"> 4: </if> would be turned into: 1: /* blah.js */ 2: 3: /* win.js */ 4: alert('Ew; Windows.'); 5: Args: source_file: A file to process. Attributes: contents: Expanded contents after inlining <include>s and stripping <if>s. included_files: A list of files that were inlined via <include>. """ _IF_TAGS_REG = "</?if[^>]*?>" _INCLUDE_REG = "<include[^>]+src=['\"]([^>]*)['\"]>" def __init__(self, source_file): self._included_files = set() self._index = 0 self._lines = self._get_file(source_file) while self._index < len(self._lines): current_line = self._lines[self._index] match = re.search(self._INCLUDE_REG, current_line[2]) if match: file_dir = os.path.dirname(current_line[0]) self._include_file(os.path.join(file_dir, match.group(1))) else: self._index += 1 for i, line in enumerate(self._lines): self._lines[i] = line[:2] + (re.sub(self._IF_TAGS_REG, "", line[2]),) self.contents = "\n".join(l[2] for l in self._lines) # Returns a list of tuples in the format: (file, line number, line contents). def _get_file(self, source_file): lines = FileCache.read(source_file).splitlines() return [(source_file, lnum + 1, line) for lnum, line in enumerate(lines)] def _include_file(self, source_file): self._included_files.add(source_file) f = self._get_file(source_file) self._lines = self._lines[:self._index] + f + self._lines[self._index + 1:] def get_file_from_line(self, line_number): """Get the original file and line number for an expanded file's line number. Args: line_number: A processed file's line number. """ line_number = int(line_number) - 1 return LineNumber(self._lines[line_number][0], self._lines[line_number][1]) @property def included_files(self): """A list of files that were inlined via <include>.""" return self._included_files
cbingos/cpro
refs/heads/master
whoosh/lang/morph_en.py
43
""" Contains the variations() function for expanding an English word into multiple variations by programatically adding and removing suffixes. Translated to Python from the ``com.sun.labs.minion.lexmorph.LiteMorph_en`` class of Sun's `Minion search engine <https://minion.dev.java.net/>`_. """ import re from whoosh.compat import xrange, iteritems # Rule exceptions exceptions = [ "a", "abandoner abandon abandons abandoned abandoning abandonings abandoners", "abdomen abdomens", "about", "above", "acid acids acidic acidity acidities", "across", "act acts acted acting actor actors", "ad ads", "add adds added adding addings addition additions adder adders", "advertise advertises advertised advertising advertiser advertisers advertisement advertisements advertisings", "after", "again", "against", "ago", "all", "almost", "along", "already", "also", "although", "alumna alumnae alumnus alumni", "always", "amen amens", "amidships", "amid amidst", "among amongst", "an", "analysis analyses", "and", "another other others", "antenna antennas antennae", "antitheses antithesis", "any", "anyone anybody", "anything", "appendix appendixes appendices", "apropos", "aquarium aquariums aquaria", "argument arguments argue argues argued arguing arguings arguer arguers", "arise arises arose arisen ariser arisers arising arisings", "around", "as", "asbestos", "at", "atlas atlases", "auger augers augered augering augerings augerer augerers", "augment augments augmented augmenting augmentings augmentation augmentations augmenter augmenters", "automata automaton automatons", "automation automating automate automates automated automatic", "avoirdupois", "awake awakes awoke awaked awoken awaker awakers awaking awakings awakening awakenings", "away", "awful awfully awfulness", "axis axes axises", "bacillus bacilli", "bacterium bacteria", "bad worse worst badly badness", "bas", "bases basis", "bases base based basing basings basely baseness basenesses basement basements baseless basic basics", "be am are is was were been being", "bear bears bore borne bearing bearings bearer bearers", "beat beats beaten beating beatings beater beaters", "because", "become becomes became becoming", "beef beefs beeves beefed beefing", "beer beers", "before", "begin begins began begun beginning beginnings beginner beginners", "behalf behalves", "being beings", "bend bends bent bending bendings bender benders", "bereave bereaves bereaved bereft bereaving bereavings bereavement bereavements", "beside besides", "best bests bested besting", "bet bets betting bettor bettors", "betimes", "between", "beyond", "bid bids bade bidden bidding biddings bidder bidders", "bier biers", "bind binds bound binding bindings binder binders", "bit bits", "bite bites bit bitten biting bitings biter biters", "blackfoot blackfeet", "bleed bleeds bled bleeding bleedings bleeder bleeders", "blow blows blew blown blowing blowings blower blowers", "bookshelf bookshelves", "both", "bound bounds bounded bounding boundings bounder bounders boundless", "bourgeois bourgeoisie", "bra bras", "brahman brahmans", "break breaks broke broken breaking breakings breaker breakers", "breed breeds bred breeding breedings breeder breeders", "bring brings brought bringing bringings bringer bringers", "build builds built building buildings builder builders", "bus buses bused bussed busing bussing busings bussings buser busers busser bussers", "buss busses bussed bussing bussings busser bussers", "but", "buy buys bought buying buyings buyer buyers", "by", "calf calves calved calving calvings calver calvers", "can cans canned canning cannings canner canners", "can could cannot", "canoes canoe canoed canoeing canoeings canoer canoers", "catch catches caught catching catchings catcher catchers", "cement cements cemented cementing cementings cementer cementers", "cent cents", "center centers centered centering centerings centerless", "child children childless childish childishly", "choose chooses chose chosen choosing choosings chooser choosers", "cling clings clung clinging clingings clinger clingers", "colloquium colloquia colloquiums", "come comes came coming comings comer comers", "comment comments commented commenting commentings commenter commenters", "compendium compendia compendiums", "complement complements complemented complementing complementings complementer complementers complementary", "compliment compliments complimented complimenting complimentings complimenter complimenters complimentary", "concerto concertos concerti", "condiment condiments", "corps", "cortex cortices cortexes cortical", "couscous", "creep creeps crept creeping creepings creeper creepers creepy", "crisis crises", "criterion criteria criterial", "cryptanalysis cryptanalyses", "curriculum curricula curriculums curricular", "datum data", "day days daily", "deal deals dealt dealing dealings dealer dealers", "decrement decrements decremented decrementing decrementings decrementer decrementers decremental", "deer deers", "demented dementia", "desideratum desiderata", "diagnosis diagnoses diagnose diagnosed diagnosing diagnostic", "dialysis dialyses", "dice dices diced dicing dicings dicer dicers", "die dice", "die dies died dying dyings", "dig digs dug digging diggings digger diggers", "dive dives diver divers dove dived diving divings", "divest divests divester divesters divested divesting divestings divestment divestments", "do does did done doing doings doer doers", "document documents documented documenting documentings documenter documenters documentation documentations documentary", "doe does", "dove doves", "downstairs", "dozen", "draw draws drew drawn drawing drawings drawer drawers", "drink drinks drank drunk drinking drinkings drinker drinkers", "drive drives drove driven driving drivings driver drivers driverless", "due dues duly", "during", "e", "each", "eager eagerer eagerest eagerly eagerness eagernesses", "early earlier earliest", "easement easements", "eat eats ate eaten eating eatings eater eaters", "effluvium effluvia", "either", "element elements elementary", "elf elves elfen", "ellipse ellipses elliptic elliptical elliptically", "ellipsis ellipses elliptic elliptical elliptically", "else", "embolus emboli embolic embolism", "emolument emoluments", "emphasis emphases", "employ employs employed employing employer employers employee employees employment employments employable", "enough", "equilibrium equilibria equilibriums", "erratum errata", "ever", "every", "everything", "exotic exotically exoticness exotica", "experiment experiments experimented experimenting experimentings experimenter experimenters experimentation experimental", "extra extras", "fall falls fell fallen falling fallings faller fallers", "far farther farthest", "fee fees feeless", "feed feeds fed feeding feedings feeder feeders", "feel feels felt feeling feelings feeler feelers", "ferment ferments fermented fermenting fermentings fermentation fermentations fermenter fermenters", "few fewer fewest", "fight fights fought fighting fightings fighter fighters", "figment figments", "filament filaments", "find finds found finding findings finder finders", "firmament firmaments", "flee flees fled fleeing fleeings", "fling flings flung flinging flingings flinger flingers", "floe floes", "fly flies flew flown flying flyings flier fliers flyer flyers", "focus foci focuses focused focusing focusses focussed focussing focuser focal", "foment foments fomented fomenting fomentings fomenter fomenters", "foot feet", "foot foots footed footing footer footers", "footing footings footer footers", "for", "forbid forbids forbade forbidden forbidding forbiddings forbidder forbidders", "foresee foresaw foreseen foreseeing foreseeings foreseer foreseers", "forest forests forester foresting forestation forestations", "forget forgets forgot forgotten forgetting forgettings forgetter forgetters forgetful", "forsake forsakes forsook forsaken forsaking forsakings forsaker forsakers", "found founds founded founding foundings founder founders", "fragment fragments fragmented fragmenting fragmentings fragmentation fragmentations fragmenter fragmenters", "free frees freer freest freed freeing freely freeness freenesses", "freeze freezes froze frozen freezing freezings freezer freezers", "from", "full fully fuller fullest", "fuller fullers full fulls fulled fulling fullings", "fungus fungi funguses fungal", "gallows", "ganglion ganglia ganglions ganglionic", "garment garments", "gas gasses gassed gassing gassings gasser gassers", "gas gases gasses gaseous gasless", "gel gels gelled gelling gellings geller gellers", "german germans germanic germany German Germans Germanic Germany", "get gets got gotten getting gettings getter getters", "give gives gave given giving givings giver givers", "gladiolus gladioli gladioluses gladiola gladiolas gladiolae", "glans glandes", "gluiness gluey glue glues glued gluing gluings gluer gluers", "go goes went gone going goings goer goers", "godchild godchildren", "good better best goodly goodness goodnesses", "goods", "goose geese", "goose gooses goosed goosing goosings gooser goosers", "grandchild grandchildren", "grind grinds ground grinding grindings grinder grinders", "ground grounds grounded grounding groundings grounder grounders groundless", "grow grows grew grown growing growings grower growers growth", "gum gums gummed gumming gummings gummer gummers", "half halves", "halve halves halved halving halvings halver halvers", "hang hangs hung hanged hanging hangings hanger hangers", "have has had having havings haver havers", "he him his himself", "hear hears heard hearing hearings hearer hearers", "here", "hide hides hid hidden hiding hidings hider hiders", "hippopotamus hippopotami hippopotamuses", "hold holds held holding holdings holder holders", "honorarium honoraria honorariums", "hoof hoofs hooves hoofed hoofing hoofer hoofers", "how", "hum hums hummed humming hummings hummer hummers", "hymen hymens hymenal", "hypotheses hypothesis hypothesize hypothesizes hypothesized hypothesizer hypothesizing hypothetical hypothetically", "i", "if iffy", "impediment impediments", "implement implements implemented implementing implementings implementation implementations implementer implementers", "imply implies implied implying implyings implier impliers", "in inner", "inclement", "increment increments incremented incrementing incrementings incrementer incrementers incremental incrementally", "index indexes indexed indexing indexings indexer indexers", "index indexes indices indexical indexicals", "indoor indoors", "instrument instruments instrumented instrumenting instrumentings instrumenter instrumenters instrumentation instrumentations instrumental", "integument integumentary", "into", "it its itself", "java", "july julys", "keep keeps kept keeping keepings keeper keepers", "knife knifes knifed knifing knifings knifer knifers", "knife knives", "know knows knew known knowing knowings knower knowers knowledge", "lament laments lamented lamenting lamentings lamentation lamentations lamenter lamenters lamentable lamentably", "larva larvae larvas larval", "late later latest lately lateness", "latter latterly", "lay lays laid laying layer layers", "layer layers layered layering layerings", "lead leads led leading leadings leader leaders leaderless", "leaf leafs leafed leafing leafings leafer leafers", "leaf leaves leafless", "leave leaves left leaving leavings leaver leavers", "lend lends lent lending lendings lender lenders", "less lesser least", "let lets letting lettings", "lie lies lay lain lying lier liers", "lie lies lied lying liar liars", "life lives lifeless", "light lights lit lighted lighting lightings lightly lighter lighters lightness lightnesses lightless", "likely likelier likeliest", "limen limens", "lineament lineaments", "liniment liniments", "live alive living", "live lives lived living livings", "liver livers", "loaf loafs loafed loafing loafings loafer loafers", "loaf loaves", "logic logics logical logically", "lose loses lost losing loser losers loss losses", "louse lice", "lumen lumens", "make makes made making makings maker makers", "man mans manned manning mannings", "man men", "manly manlier manliest manliness manful manfulness manhood", "manic manically", "manner manners mannered mannerly mannerless mannerful", "many", "matrix matrices matrixes", "may might", "maximum maxima maximums maximal maximize maximizes maximized maximizing", "mean means meant meaning meanings meaningless meaningful", "mean meaner meanest meanly meanness meannesses", "median medians medianly medial", "medium media mediums", "meet meets met meeting meetings", "memorandum memoranda memorandums", "mere merely", "metal metals metallic", "might mighty mightily", "millenium millennia milleniums millennial", "mine mines mined mining minings miner miners", "mine my our ours", "minimum minima minimums minimal", "minus minuses", "miscellaneous miscellanea miscellaneously miscellaneousness miscellany", "molest molests molested molesting molestings molester molesters", "moment moments", "monument monuments monumental", "more most", "mouse mice mouseless", "much", "multiply multiplies multiplier multipliers multiple multiples multiplying multiplyings multiplication multiplications", "mum mums mummed mumming mummings mummer mummers", "must musts", "neither", "nemeses nemesis", "neurosis neuroses neurotic neurotics", "nomen", "none", "nos no noes", "not", "nothing nothings nothingness", "now", "nowadays", "nucleus nuclei nucleuses nuclear", "number numbers numbered numbering numberings numberless", "nutriment nutriments nutrient nutrients nutrition nutritions", "oasis oases", "octopus octopi octopuses", "of", "off", "offer offers offered offering offerings offerer offerers offeror offerors", "often", "oftentimes", "ointment ointments", "omen omens", "on", "once", "only", "ornament ornaments ornamented ornamenting ornamentings ornamentation ornamenter ornamenters ornamental", "outdoor outdoors", "outlay outlays", "outlie outlies outlay outlied outlain outlying outlier outliers", "ovum ova", "ox oxen", "parentheses parenthesis", "parliament parliaments parliamentary", "passerby passer-by passersby passers-by", "past pasts", "pay pays paid paying payings payer payers payee payees payment payments", "per", "perhaps", "person persons people", "phenomenon phenomena phenomenal", "pi", "picnic picnics picnicker picnickers picnicked picnicking picnickings", "pigment pigments pigmented pigmenting pigmentings pigmenter pigmenters pigmentation pigmentations", "please pleases pleased pleasing pleasings pleaser pleasers pleasure pleasures pleasuring pleasurings pleasant pleasantly pleasureless pleasureful", "plus pluses plusses", "polyhedra polyhedron polyhedral", "priest priests priestly priestlier priestliest priestliness priestless", "prognosis prognoses", "prostheses prosthesis", "prove proves proved proving provings proofs proof prover provers provable", "psychosis psychoses psychotic psychotics", "qed", "quiz quizzes quizzed quizzing quizzings quizzer quizzers", "raiment", "rather", "re", "real really", "redo redoes redid redone redoing redoings redoer redoers", "regiment regiments regimented regimenting regimenter regimenters regimentation regimental", "rendezvous", "requiz requizzes requizzed requizzing requizzings requizzer requizzers", "ride rides rode ridden riding ridings rider riders rideless", "ring rings rang rung ringing ringings ringer ringers ringless", "rise rises rose risen rising risings riser risers", "rose roses", "rudiment rudiments rudimentary", "rum rums rummed rumming rummings rummer rummers", "run runs ran running runnings runner runners", "sacrament sacraments sacramental", "same sameness", "sans", "saw saws sawed sawn sawing sawings sawyer sawyers", "say says said saying sayings sayer sayers", "scarf scarfs scarves scarfless", "schema schemata schemas", "sediment sediments sedimentary sedimentation sedimentations", "see sees saw seen seeing seeings seer seers", "seek seeks sought seeking seekings seeker seekers", "segment segments segmented segmenting segmentings segmenter segmenters segmentation segmentations", "self selves selfless", "sell sells sold selling sellings seller sellers", "semen", "send sends sent sending sendings sender senders", "sentiment sentiments sentimental", "series", "set sets setting settings", "several severally", "sew sews sewed sewn sewing sewings sewer sewers", "sewer sewers sewerless", "shake shakes shook shaken shaking shakings shaker shakers", "shall should", "shaman shamans", "shave shaves shaved shaven shaving shavings shaver shavers shaveless", "she her hers herself", "sheaf sheaves sheafless", "sheep", "shelf shelves shelved shelfing shelvings shelver shelvers shelfless", "shine shines shined shone shining shinings shiner shiners shineless", "shoe shoes shoed shod shoeing shoeings shoer shoers shoeless", "shoot shoots shot shooting shootings shooter shooters", "shot shots", "show shows showed shown showing showings shower showers", "shower showers showery showerless", "shrink shrinks shrank shrunk shrinking shrinkings shrinker shrinkers shrinkable", "sideways", "simply simple simpler simplest", "since", "sing sings sang sung singing singings singer singers singable", "sink sinks sank sunk sinking sinkings sinker sinkers sinkable", "sit sits sat sitting sittings sitter sitters", "ski skis skied skiing skiings skier skiers skiless skiable", "sky skies", "slay slays slew slain slaying slayings slayer slayers", "sleep sleeps slept sleeping sleepings sleeper sleepers sleepless", "so", "some", "something", "sometime sometimes", "soon", "spa spas", "speak speaks spoke spoken speaking speakings speaker speakers", "species specie", "spectrum spectra spectrums", "speed speeds sped speeded speeding speedings speeder speeders", "spend spends spent spending spendings spender spenders spendable", "spin spins spun spinning spinnings spinner spinners", "spoke spokes", "spring springs sprang sprung springing springings springer springers springy springiness", "staff staffs staves staffed staffing staffings staffer staffers", "stand stands stood standing standings", "stasis stases", "steal steals stole stolen stealing stealings stealer stealers", "stick sticks stuck sticking stickings sticker stickers", "stigma stigmata stigmas stigmatize stigmatizes stigmatized stigmatizing", "stimulus stimuli", "sting stings stung stinging stingings stinger stingers", "stink stinks stank stunk stinking stinkings stinker stinkers", "stomach stomachs", "stratum strata stratums", "stride strides strode stridden striding stridings strider striders", "string strings strung stringing stringings stringer stringers stringless", "strive strives strove striven striving strivings striver strivers", "strum strums strummed strumming strummings strummer strummers strummable", "such", "suffer suffers suffered suffering sufferings sufferer sufferers sufferable", "suggest suggests suggested suggesting suggestings suggester suggesters suggestor suggestors suggestive suggestion suggestions suggestible suggestable", "sum sums summed summing summings summer summers", "summer summers summered summering summerings", "supplement supplements supplemented supplementing supplementings supplementation supplementer supplementers supplementary supplemental", "supply supplies supplied supplying supplyings supplier suppliers", "swear swears swore sworn swearing swearings swearer swearers", "sweep sweeps swept sweeping sweepings sweeper sweepers", "swell swells swelled swollen swelling swellings", "swim swims swam swum swimming swimmings swimmer swimmers swimable", "swine", "swing swings swung swinging swingings swinger swingers", "syllabus syllabi syllabuses", "symposium symposia symposiums", "synapse synapses", "synapsis synapses", "synopsis synopses", "synthesis syntheses", "tableau tableaux tableaus", "take takes took taken taking takings taker takers takable", "teach teaches taught teaching teachings teacher teachers teachable", "tear tears tore torn tearing tearings tearer tearers tearable", "tegument teguments", "tell tells told telling tellings teller tellers tellable", "temperament temperaments temperamental temperamentally", "tenement tenements", "the", "there theres", "theses thesis", "they them their theirs themselves", "thief thieves thieving thievings", "think thinks thought thinking thinker thinkers thinkable", "this that these those", "thought thoughts thougtful thoughtless", "throw throws threw thrown throwing throwings thrower throwers throwable", "tic tics", "tie ties tied tying tyings tier tiers tieable tieless", "tier tiers tiered tiering tierings tierer tierers", "to", "toe toes toed toeing toeings toer toers toeless", "together togetherness", "too", "tooth teeth toothless", "topaz topazes", "torment torments tormented tormenting tormentings tormenter tormenters tormentable", "toward towards", "tread treads trod trodden treading treadings treader treaders", "tread treads treadless retread retreads", "true truly trueness", "two twos", "u", "under", "underlay underlays underlaid underlaying underlayings underlayer underlayers", "underlie underlies underlay underlain underlying underlier underliers", "undo undoes undid undone undoing undoings undoer undoers undoable", "unrest unrestful", "until", "unto", "up", "upon", "upstairs", "use uses user users used using useful useless", "various variously", "vehement vehemently vehemence", "versus", "very", "visit visits visited visiting visitings visitor visitors", "vortex vortexes vortices", "wake wakes woke waked woken waking wakings waker wakers wakeful wakefulness wakefulnesses wakeable", "wear wears wore worn wearing wearings wearer wearers wearable", "weather weathers weathered weathering weatherly", "weave weaves wove woven weaving weavings weaver weavers weaveable", "weep weeps wept weeping weepings weeper weepers", "wharf wharfs wharves", "where wheres", "whereas whereases", "whether whethers", "while whiles whilst whiled whiling", "whiz whizzes whizzed whizzing whizzings whizzer whizzers", "who whom whos whose whoses", "why whys", "wife wives wifeless", "will wills willed willing willings willful", "will would", "win wins won winning winnings winner winners winnable", "wind winds wound winding windings winder winders windable", "wind winds windy windless", "with", "within", "without", "wolf wolves", "woman women womanless womanly", "wound wounds wounded wounding woundings", "write writes wrote written writing writings writer writers writeable", "yeses yes", "yet yets", "you your yours yourself" ] _exdict = {} for exlist in exceptions: for ex in exlist.split(" "): _exdict[ex] = exlist # Programmatic rules vowels = "aeiouy" cons = "bcdfghjklmnpqrstvwxyz" rules = ( # Words ending in S # (e.g., happiness, business) (r"[%s].*[%s](iness)" % (vowels, cons), "y,ies,ier,iers,iest,ied,ying,yings,ily,inesses,iment,iments,iless,iful"), # (e.g., baseless, shoeless) (r"[%s].*(eless)" % vowels, "e,es,er,ers,est,ed,ing,ings,eing,eings,ely,eness,enesses,ement,ements,eness,enesses,eful"), # (e.g., gutless, hatless, spotless) (r"[%s][%s][bdgklmnprt]?(less)" % (cons, vowels), ",s,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,ful"), # (e.g., thoughtless, worthless) (r"[%s].*?(less)" % vowels, ",s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,ful"), # (e.g., baseness, toeness) (r"[%s].*(eness)" % vowels, "e,es,er,ers,est,ed,ing,ings,eing,eings,ely,enesses,ement,ements,eless,eful"), # (e.g., bluntness, grayness) (r"[%s].*(ness)" % vowels, ",s,er,ers,est,ed,ing,ings,ly,nesses,ment,ments,less,ful"), # (e.g., albatross, kiss) (r"[%s]ss" % vowels, "es,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., joyous, fractious, gaseous) (r"[%s].*(ous)" % vowels, "ly,ness"), # (e.g., tries, unties, jollies, beauties) (r"(ies)", "y,ie,yer,yers,ier,iers,iest,ied,ying,yings,yness,iness,ieness,ynesses,inesses,ienesses,iment,iement,iments,iements,yless,iless,ieless,yful,iful,ieful"), # (e.g., crisis, kinesis) (r"[%s].*(sis)" % vowels, "ses,sises,sisness,sisment,sisments,sisless,sisful"), # (e.g., bronchitis, bursitis) (r"[%s].*(is)" % vowels, "es,ness,ment,ments,less,ful"), (r"[%s].*[cs]h(es)" % vowels, ",e,er,ers,est,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ement,ments,ements,less,eless,ful,eful"), # (e.g., tokenizes) // adds British variations (r"[%s].*[%s](izes)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ise,iser,isers,ised,ising,isings,isation,isations"), # (e.g., tokenises) // British variant // ~expertise (r"[%s].*[%s](ises)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ise,iser,isers,ised,ising,isings,isation,isations"), # (e.g., aches, arches) (r"[%s].*[jsxz](es)" % vowels, ",e,er,ers,est,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ement,ments,ements,less,eless,ful,eful"), # (e.g., judges, abridges) (r"[%s].*dg(es)" % vowels, "e,er,ers,est,ed,ing,ings,ely,eness,enesses,ment,ments,ement,ements,eless,eful"), # (e.g., trees, races, likes, agrees) covers all other -es words (r"e(s)", ",*"), # (e.g., segments, bisegments, cosegments) (r"segment(s)", ",*"), # (e.g., pigments, depigments, repigments) (r"pigment(s)", ",*"), # (e.g., judgments, abridgments) (r"[%s].*dg(ments)" % vowels, "ment,*ments"), # (e.g., merriments, embodiments) -iment in turn will generate y and *y (redo y) (r"[%s].*[%s]iment(s)" % (vowels, cons), ",*"), # (e.g., atonements, entrapments) (r"[%s].*ment(s)" % vowels, ",*"), # (e.g., viewers, meters, traders, transfers) (r"[%s].*er(s)" % vowels, ",*"), # (e.g., unflags) polysyllables (r"[%s].*[%s][%s][bdglmnprt](s)" % (vowels, cons, vowels), ",*"), # (e.g., frogs) monosyllables (r"[%s][%s][bdglmnprt](s)" % (vowels, cons), ",*"), # (e.g., killings, muggings) (r"[%s].*ing(s)" % vowels, ",*"), # (e.g., hulls, tolls) (r"[%s].*ll(s)" % vowels, ",*"), # e.g., boas, polkas, spas) don't generate latin endings (r"a(s)", ",er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., beads, toads) (r"[%s].*[%s].*(s)" % (vowels, cons), ",*"), # (e.g., boas, zoos) (r"[%s].*[%s](s)" % (cons, vowels), ",er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., ss, sss, ssss) no vowel (vowel case is already handled above) (r"ss()", ""), # (e.g., cds, lcds, m-16s) no vowel (can be a plural noun, but not verb) (r"[%s].*[%s1234567890](s)" % (cons, cons), ""), # Words ending in E # (e.g., apple, so it doesn't include apply) (r"appl(e)", "es,er,ers,est,ed,ing,ings,ely,eness,enesses,ement,ements,eless,eful"), # (e.g., supple, so it doesn't include supply) (r"suppl(e)", "es,er,ers,est,ed,ing,ings,ely,eness,enesses,ement,ements,eless,eful"), # (e.g., able, abominable, fungible, table, enable, idle, subtle) (r"[%s].*[%s]l(e)" % (vowels, cons), "es,er,ers,est,ed,ing,ings,y,ely,eness,enesses,ement,ements,eless,eful"), # (e.g., bookie, magpie, vie) (r"(ie)", "ies,ier,iers,iest,ied,ying,yings,iely,ieness,ienesses,iement,iements,ieless,ieful"), # (e.g., dye, redye, redeye) (r"ye()", "s,r,rs,st,d,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., judge, abridge) (r"[%s].*dg(e)" % vowels, "es,er,ers,est,ed,ing,ings,ely,eness,enesses,ment,ments,less,ful,ement,ements,eless,eful"), # (e.g., true, due, imbue) (r"u(e)", "es,er,ers,est,ed,ing,ings,eing,eings,ly,ely,eness,enesses,ment,ments,less,ful,ement,ements,eless,eful"), # (e.g., tokenize) // adds British variations (r"[%s].*[%s](ize)" % (vowels, cons), "izes,izer,izers,ized,izing,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"), # (e.g., tokenise) // British variant // ~expertise (r"[%s].*[%s](ise)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,iser,isers,ised,ising,isings,isation,isations"), # (e.g., tree, agree, rage, horse, hoarse) (r"[%s].*[%s](e)" % (vowels, cons), "es,er,ers,est,ed,ing,ings,eing,eings,ely,eness,enesses,ement,ements,eless,eful"), # Words ending in -ED # (e.g., agreed, freed, decreed, treed) (r"ree(d)", "ds,der,ders,ded,ding,dings,dly,dness,dnesses,dment,dments,dless,dful,,*"), # (e.g., feed, seed, Xweed) (r"ee(d)", "ds,der,ders,ded,ding,dings,dly,dness,dnesses,dment,dments,dless,dful"), # (e.g., tried) (r"[%s](ied)" % cons, "y,ie,ies,ier,iers,iest,ying,yings,ily,yly,iness,yness,inesses,ynesses,iment,iments,iless,iful,yment,yments,yless,yful"), # (e.g., controlled, fulfilled, rebelled) (r"[%s].*[%s].*l(led)" % (vowels, cons), ",s,er,ers,est,ing,ings,ly,ness,nesses,ment,ments,less,ful,&,&s,&er,&ers,&est,&ing,&ings,&y,&ness,&nesses,&ment,&ments,&ful"), # (e.g., pulled, filled, fulled) (r"[%s].*l(led)" % vowels, "&,&s,&er,&ers,&est,&ing,&ings,&y,&ness,&nesses,&ment,&ments,&ful"), # (e.g., hissed, grossed) (r"[%s].*s(sed)" % vowels, "&,&es,&er,&ers,&est,&ing,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful"), # (e.g., hugged, trekked) (r"[%s][%s](?P<ed1>[bdgklmnprt])((?P=ed1)ed)", ",s,&er,&ers,&est,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., tokenize) // adds British variations (r"[%s].*[%s](ized)" % (vowels, cons), "izes,izer,izers,ize,izing,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"), # (e.g., tokenise) // British variant // ~expertise (r"[%s].*[%s](ized)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,iser,isers,ise,ising,isings,isation,isations"), # (e.g., spoiled, tooled, tracked, roasted, atoned, abridged) (r"[%s].*(ed)" % vowels, ",e,s,es,er,ers,est,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ement,ments,ements,less,eless,ful,eful"), # (e.g., bed, sled) words with a single e as the only vowel (r"ed()", "s,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"), # Words ending in -ER # (e.g., altimeter, ammeter, odometer, perimeter) (r"meter()", "s,er,ers,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., agreer, beer, budgeteer, engineer, freer) (r"eer()", "eers,eered,eering,eerings,eerly,eerness,eernesses,eerment,eerments,eerless,eerful,ee,ees,eest,eed,eeing,eeings,eely,eeness,eenesses,eement,eements,eeless,eeful,eerer,eerers,eerest"), # (e.g., acidifier, saltier) (r"[%s].*[%s](ier)" % (vowels, cons), "y,ie,ies,iest,ied,ying,yings,ily,yly,iness,yness,inesses,ynesses,yment,yments,yless,yful,iment,iments,iless,iful,iers,iered,iering,ierings,ierly,ierness,iernesses,ierment,ierments,ierless,ierful,ierer,ierers,ierest"), # (e.g., puller, filler, fuller) (r"[%s].*l(ler)" % vowels, "&,&s,&est,&ed,&ing,&ings,ly,lely,&ness,&nesses,&ment,&ments,&ful,&ers,&ered,&ering,&erings,&erly,&erness,&ernesses,&erments,&erless,&erful"), # (e.g., hisser, grosser) (r"[%s].*s(ser)" % vowels, "&,&es,&est,&ed,&ing,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful,&ers,&ered,&ering,&erings,&erly,&erness,&ernesses,&erment,&erments,&erless,&erful"), # (e.g., bigger, trekker, hitter) (r"[%s][%s](?P<er1>[bdgkmnprt])((?P=er1)er)" % (cons, vowels), "s,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful,&ers,&ered,&ering,&erings,&erly,&erness,&ernesses,&erments,&erless,&erful"), # (e.g., tokenize) // adds British variations (r"[%s].*[%s](izer)" % (vowels, cons), "izes,ize,izers,ized,izing,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"), # (e.g., tokenise) // British variant // ~expertise (r"[%s].*[%s](iser)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,ise,isers,ised,ising,isings,isation,isations"), #(e.g., actioner, atoner, icer, trader, accruer, churchgoer, prefer) (r"[%s].*(er)" % vowels, ",e,s,es,est,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ments,less,ful,ement,ements,eless,eful,ers,ered,erred,ering,erring,erings,errings,erly,erness,ernesses,erment,erments,erless,erful,erer,erers,erest,errer,errers,errest"), # Words ending in -EST # (e.g., sliest, happiest, wittiest) (r"[%s](iest)" % cons, "y,ies,ier,iers,ied,ying,yings,ily,yly,iness,yness,inesses,ynesses,iment,iments,iless,iful"), # (e.g., fullest) (r"[%s].*l(lest)" % vowels, "&,&s,&er,&ers,&ed,&ing,&ings,ly,&ness,&nesses,&ment,&ments,&ful"), # (e.g., grossest) (r"[%s].*s(sest)" % vowels, "&,&es,&er,&ers,&ed,&ing,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful"), # (e.g., biggest) (r"[%s][%s](?P<est1>[bdglmnprst])((?P=est1)est)" % (cons, vowels), ",s,&er,&ers,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., basest, archest, rashest) (r"[%s].*([cs]h|[jsxz])(est)" % vowels, "e,es,er,ers,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ments,less,ful,ement,ements,eless,eful,ests,ester,esters,ested,esting,estings,estly,estness,estnesses,estment,estments,estless,estful"), # (e.g., severest, Xinterest, merest) (r"er(est)", "e,es,er,ers,ed,eing,eings,ely,eness,enesses,ement,ements,eless,eful,ests,ester,esters,ested,esting,estings,estly,estness,estnesses,estment,estments,estless,estful"), # (e.g., slickest, coolest, ablest, amplest, protest, quest) (r"[%s].*(est)" % vowels, ",e,s,es,er,ers,ed,ing,ings,ly,ely,ness,eness,nesses,enesses,ment,ments,less,ful,ement,ements,eless,eful,ests,ester,esters,ested,esting,estings,estly,estness,estnesses,estment,estments,estless,estful"), # (e.g., rest, test) (r"est", "s,er,ers,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # Words ending in -FUL # (e.g., beautiful, plentiful) (r"[%s].*[%s](iful)" % (vowels, cons), "ifully,ifulness,*y"), # (e.g., hopeful, sorrowful) (r"[%s].*(ful)" % vowels, "fully,fulness,,*"), # Words ending in -ICAL (r"[%s].*(ical)" % vowels, "ic,ics,ically"), # Words ending in -IC (r"[%s].*(ic)" % vowels, "ics,ical,ically"), # Words ending in -ING # (e.g., dying, crying, supplying) (r"[%s](ying)" % cons, "yings,ie,y,ies,ier,iers,iest,ied,iely,yly,ieness,yness,ienesses,ynesses,iment,iments,iless,iful"), # (e.g., pulling, filling, fulling) (r"[%s].*l(ling)" % vowels, ",*,&,&s,&er,&ers,&est,&ed,&ings,&ness,&nesses,&ment,&ments,&ful"), # (e.g., hissing, grossing, processing) (r"[%s].*s(sing)" % vowels, "&,&s,&er,&ers,&est,&ed,&ings,&ly,&ness,&nesses,&ment,&ments,&less,&ful"), # (e.g., hugging, trekking) (r"[%s][%s](?P<ing1>[bdgklmnprt])((?P=ing1)ing)" % (cons, vowels), ",s,&er,&ers,&est,&ed,&ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., freeing, agreeing) (r"eeing()", "ee,ees,eer,eers,eest,eed,eeings,eely,eeness,eenesses,eement,eements,eeless,eeful"), # (e.g., ageing, aweing) (r"[%s].*(eing)" % vowels, "e,es,er,ers,est,ed,eings,ely,eness,enesses,ement,ements,eless,eful"), # (e.g., toying, playing) (r"[%s].*y(ing)" % vowels, ",s,er,ers,est,ed,ings,ly,ingly,ness,nesses,ment,ments,less,ful"), # (e.g., editing, crediting, expediting, siting, exciting) (r"[%s].*[%s][eio]t(ing)" % (vowels, cons), ",*,*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # (e.g., robing, siding, doling, translating, flaking) (r"[%s][%s][bdgklmt](ing)" % (cons, vowels), "*e,ings,inger,ingers,ingest,inged,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # (e.g., tokenize) // adds British variations (r"[%s].*[%s](izing)" % (vowels, cons), "izes,izer,izers,ized,ize,izings,ization,izations,ise,ises,iser,isers,ised,ising,isings,isation,isations"), # (e.g., tokenise) // British variant // ~expertise (r"[%s].*[%s](ising)" % (vowels, cons), "ize,izes,izer,izers,ized,izing,izings,ization,izations,ises,iser,isers,ised,ise,isings,isation,isations"), # (e.g., icing, aging, achieving, amazing, housing) (r"[%s][cgsvz](ing)" % vowels, "*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # (e.g., dancing, troubling, arguing, bluing, carving) (r"[%s][clsuv](ing)" % cons, "*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # (e.g., charging, bulging) (r"[%s].*[lr]g(ing)" % vowels, "*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # (e.g., farming, harping, interesting, bedspring, redwing) (r"[%s].*[%s][bdfjkmnpqrtwxz](ing)" % (vowels, cons), ",*,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # (e.g., spoiling, reviling, autoing, egging, hanging, hingeing) (r"[%s].*(ing)" % vowels, ",*,*e,ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # (e.g., wing, thing) monosyllables (r"(ing)", "ings,inger,ingers,ingest,inged,inging,ingings,ingly,ingness,ingnesses,ingment,ingments,ingless,ingful"), # -LEAF rules omitted # Words ending in -MAN # (e.g., policewomen, hatchetmen, dolmen) (r"(man)", "man,mens,mener,meners,menest,mened,mening,menings,menly,menness,mennesses,menless,menful"), # Words ending in -MENT # (e.g., segment, bisegment, cosegment, pigment, depigment, repigment) (r"segment|pigment", "s,ed,ing,ings,er,ers,ly,ness,nesses,less,ful"), # (e.g., judgment, abridgment) (r"[%s].*dg(ment)" % vowels, "*e"), # (e.g., merriment, embodiment) (r"[%s].*[%s](iment)" % (vowels, cons), "*y"), # (e.g., atonement, entrapment) (r"[%s].*[%s](ment)" % (vowels, cons), ",*"), # Words ending in -O # (e.g., taboo, rodeo) (r"[%s]o()" % vowels, "s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., tomato, bonito) (r"[%s].*o()" % vowels, "s,es,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # Words ending in -UM # (e.g., datum, quantum, tedium, strum, [oil]drum, vacuum) (r"[%s].*(um)" % vowels, "a,ums,umer,ummer,umers,ummers,umed,ummed,uming,umming,umings,ummings,umness,umments,umless,umful"), # Words ending in -Y # (e.g., ably, horribly, wobbly) (r"[%s].*b(ly)" % vowels, "le,les,ler,lers,lest,led,ling,lings,leness,lenesses,lement,lements,leless,leful"), # (e.g., happily, dizzily) (r"[%s].*[%s](ily)" % (vowels, cons), "y,ies,ier,iers,iest,ied,ying,yings,yness,iness,ynesses,inesses,iment,iments,iless,iful"), # (e.g., peaceful+ly) (r"[%s].*ful(ly)" % vowels, ",*"), # (e.g., fully, folly, coolly, fatally, dally) (r"[%s].*l(ly)" % vowels, ",*,lies,lier,liers,liest,lied,lying,lyings,liness,linesses,liment,liments,liless,liful,*l"), # (e.g., monopoly, Xcephaly, holy) (r"[%s](ly)" % vowels, "lies,lier,liers,liest,lied,lying,lyings,liness,linesses,liment,liments,liless,liful"), # (e.g., frequently, comely, deeply, apply, badly) (r"[%s].*(ly)" % vowels, ",*,lies,lier,liers,liest,lied,lying,lyings,liness,linesses,lyless,lyful"), # (e.g., happy, ply, spy, cry) (r"[%s](y)" % cons, "ies,ier,iers,iest,ied,ying,yings,ily,yness,iness,ynesses,inesses,iment,iments,iless,iful,yment,yments,yless,yful"), # (e.g., betray, gay, stay) (r"[%s]y()" % vowels, "s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # Root rules # (e.g., fix, arch, rash) (r"[%s].*(ch|sh|[jxz])()" % vowels, "es,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., unflag, open, besot) (r"[%s].*[%s][%s][bdglmnprt]()" % (vowels, cons, vowels), "s,er,ers,est,ed,ing,ings,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., bed, cop) (r"[%s][%s][bdglmnprt]()" % (cons, vowels), "s,&er,&ers,&est,&ed,&ing,&ings,ly,ness,nesses,ment,ments,less,ful"), # (e.g., schemata, automata) (r"[%s].*[%s][%s]ma(ta)" % (vowels, cons, vowels), ",s,tas,tum,tums,ton,tons,tic,tical"), # (e.g., chordata, data, errata, sonata, toccata) (r"[%s].*t(a)" % vowels, "as,ae,um,ums,on,ons,ic,ical"), # (e.g., polka, spa, schema, ova, polyhedra) (r"[%s].*[%s](a)" % (vowels, cons), "as,aed,aing,ae,ata,um,ums,on,ons,al,atic,atical"), # (e.g., full) (r"[%s].*ll()" % vowels, "s,er,ers,est,ed,ing,ings,y,ness,nesses,ment,ments,-less,ful"), # (e.g., spoon, rhythm) (r"[%s].*()", "s,er,ers,est,ed,ing,ings,ly,ness,nesses,ment,ments,less,ful"), ) # There are a limited number of named groups available in a single # regular expression, so we'll partition the list of rules into # smaller chunks. _partition_size = 20 _partitions = [] for p in xrange(0, len(rules) // _partition_size + 1): start = p * _partition_size end = (p + 1) * _partition_size pattern = "|".join("(?P<_g%s>%s)$" % (i, r[0]) for i, r in enumerate(rules[start:end])) _partitions.append(re.compile(pattern)) def variations(word): """Given an English word, returns a collection of morphological variations on the word by algorithmically adding and removing suffixes. The variation list may contain non-words (e.g. render -> renderment). >>> variations("pull") set(['pull', 'pullings', 'pullnesses', 'pullful', 'pullment', 'puller', ... ]) """ if word in _exdict: return _exdict[word].split(" ") for i, p in enumerate(_partitions): match = p.search(word) if match: # Get the named group that matched num = int([k for k, v in iteritems(match.groupdict()) if v is not None and k.startswith("_g")][0][2:]) # Get the positional groups for the matched group (all other # positional groups are None) groups = [g for g in match.groups() if g is not None] ending = groups[-1] root = word[:0 - len(ending)] if ending else word out = set((word,)) results = rules[i * _partition_size + num][1] for result in results.split(","): if result.startswith("&"): out.add(root + root[-1] + result[1:]) elif result.startswith("*"): out.union(variations(root + result[1:])) else: out.add(root + result) return set(out) return [word] if __name__ == '__main__': import time t = time.clock() s = variations("rendering") print(time.clock() - t) print(len(s))
ericpre/hyperspy
refs/heads/RELEASE_next_minor
hyperspy/conftest.py
2
# -*- coding: utf-8 -*- # Copyright 2007-2021 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. try: # Set traits toolkit to work in a headless system # Capture error when toolkit is already previously set which typically # occurs when building the doc locally from traits.etsconfig.api import ETSConfig ETSConfig.toolkit = "null" except ValueError: # in case ETSConfig.toolkit was already set previously. pass # pytest-mpl 0.7 already import pyplot, so setting the matplotlib backend to # 'agg' as early as we can is useless for testing. import matplotlib.pyplot as plt import pytest import numpy as np import matplotlib import hyperspy.api as hs matplotlib.rcParams['figure.max_open_warning'] = 25 matplotlib.rcParams['interactive'] = False hs.preferences.Plot.saturated_pixels = 0.0 hs.preferences.Plot.cmap_navigator = 'viridis' hs.preferences.Plot.cmap_signal = 'viridis' hs.preferences.Plot.pick_tolerance = 5.0 # Set parallel to False by default, so only # those tests with parallel=True are run in parallel hs.preferences.General.parallel = False @pytest.fixture(autouse=True) def add_np(doctest_namespace): doctest_namespace['np'] = np doctest_namespace['plt'] = plt doctest_namespace['hs'] = hs @pytest.fixture def pdb_cmdopt(request): return request.config.getoption("--pdb") def setup_module(mod, pdb_cmdopt): if pdb_cmdopt: import dask dask.set_options(get=dask.local.get_sync) from matplotlib.testing.conftest import mpl_test_settings try: import pytest_mpl except ImportError: # Register dummy marker to allow running the test suite without pytest-mpl def pytest_configure(config): config.addinivalue_line( "markers", "mpl_image_compare: dummy marker registration to allow running " "without the pytest-mpl plugin." )
hvy/chainer
refs/heads/master
chainer/function_node.py
3
import collections import contextlib import heapq import inspect import traceback import weakref import six import chainer from chainer import _backprop_utils from chainer import backend from chainer.backends import cuda from chainer import configuration from chainer import function_hook from chainer.graph_optimizations.static_graph_utilities \ import static_forward_optimizations from chainer import utils from chainer.utils import type_check from chainer import variable import chainerx def _to_variable_with_chainerx_fallback_array( chainerx_device, chainerx_array, fallback_array): # chainerx_array can be None. assert ( chainerx_array is None or chainerx_array.device == chainerx_device.device) var = variable.Variable._init_unchecked( chainerx_array, device=chainerx_device, requires_grad=( False if chainerx_array is None else chainerx_array.is_backprop_required())) var._chainerx_fallback_array = fallback_array return var class FunctionNode(object): """Function node of the computational graph. FunctionNode is a class representing a node in a computational graph. The node corresponds to an application of a differentiable function to input variables. When a differentiable function is applied to :class:`~chainer.Variable` objects, it creates an instance of FunctionNode implementation and calls its :meth:`apply` method. The :meth:`apply` method basically does the following three things. 1. Adding an edge from the function node to the variable node corresponding to each input. The node of each input is extracted by :attr:`Variable.node <chainer.Variable.node>`. 2. Computing the output arrays of the function. 3. Creating a :class:`~chainer.Variable` object for each output array and adding an edge from the node of the variable to the function node. The output variables are then returned. .. admonition:: Example Let ``x`` be an instance of :class:`~chainer.Variable` and ``f`` be an instance of :class:`FunctionNode` taking only one argument. Then the following code >>> import numpy, chainer >>> x = chainer.Variable(numpy.zeros(10)) >>> f = chainer.functions.math.identity.Identity() >>> y = f.apply((x,))[0] computes a new variable ``y`` and creates backward references. The backward references are actually set as per the following diagram:: x.node <--- f <--- y.node If an application of another function ``g`` occurs as >>> g = chainer.functions.math.identity.Identity() >>> z = g.apply((x,))[0] then the graph grows with a branch:: |--- f <--- y.node x.node <-+ |--- g <--- z.node Note that the branching is correctly managed on backward computation, i.e. the gradients from ``f`` and ``g`` are accumulated to the gradient of ``x``. Every function-node implementation should provide :meth:`forward` and :meth:`backward`. Instead of overriding :meth:`forward`, one can also implement :meth:`forward_cpu` and :meth:`forward_gpu` when the implementations for CPU and GPU arrays are totally different. Note that the input and output variables are inaccessible from :meth:`backward` by default. If it needs accesses to these variables, the :meth:`forward` method (or its CPU/GPU variants) has to call :meth:`retain_inputs` and :meth:`retain_outputs` appropriately. The retained input/output variables can be accessed from :meth:`backward` by calling :meth:`get_retained_inputs` and :meth:`get_retained_outputs`. .. note:: There are two types of differentiable functions in Chainer (since v3). The first type is of a function using a subclass of :class:`~chainer.Function`, which is called *old-style differentiable function*. The second type is of a function using a subclass of :class:`FunctionNode`, which is called **new-style differentiable function**. There are several advantages on using the new-style differentiable function. - The new-style differentiable function supports *differentiable backpropagation*. The backpropagated gradients computed through the new-style differentiable functions themselves support further backpropagations so that the automatic higher-order differentiation is available. - The backpropagation of the new-style differentiable function can be more computationally efficient because the interface allows an implementation to omit the computation of unneeded input gradients. Note that the new-style differentiable function is the standard way of defining a function node of the computational graph in Chainer; old- style differentiable functions are implemented as wrappers of the new- style differentiable functions. Attributes: ~FunctionNode.inputs: A tuple of the input :class:`~chainer.variable.VariableNode` objects. ~FunctionNode.outputs: A tuple of weak references to the output :class:`~chainer.variable.VariableNode` objects. ~FunctionNode.rank (int): An ordinal following the topological order of the computational graph. ~FunctionNode.stack: Stack trace retrieved at the forward computation. The stack trace is available only in the debug mode. .. versionadded:: 3.0.0 """ inputs = None outputs = None _input_layouts = None _output_layouts = None _output_count = None rank = 0 stack = None _input_indexes_to_retain = None _output_indexes_to_retain = None _retained_output_data = None _local_function_hooks = None _supports_static_optimizations = False # True if the function node is operating on ChainerX arrays and it falls # back to NumPy/CuPy implementation. _is_chainerx_fallback_mode = False # chainerx.Device instance if _is_chainerx_fallback_mode == True chainerx_device = None _chainerx_retained_inputs = None _chainerx_retained_outputs = None lazy_grad_sum = False is_elementwise = False @property def local_function_hooks(self): """Ordered dictionary of registered function hooks. Contrary to ``chainer.thread_local.function_hooks``, which registers its elements to all functions, Function hooks in this property is specific to this function. """ if self._local_function_hooks is None: self._local_function_hooks = collections.OrderedDict() return self._local_function_hooks @property def _n_local_function_hooks(self): return (0 if self._local_function_hooks is None else len(self._local_function_hooks)) @property def label(self): """Short text that represents the function. The default implementation returns its type name. Each function should override it to give more information. """ return self.__class__.__name__ @property def output_data(self): """A tuple of the retained output arrays. This property is mainly used by :class:`Function`. Users basically do not have to use this property; use :meth:`get_retained_outputs` instead. """ if self._is_chainerx_fallback_mode: retained_output_data = [ None if var is None else var.array for var in self._chainerx_retained_outputs] else: if self._retained_output_data is None: raise RuntimeError('retained output data is gone') retained_output_data = self._retained_output_data out_data = [None] * self._output_count for index, data in six.moves.zip(self._output_indexes_to_retain, retained_output_data): out_data[index] = data return tuple(out_data) @property def _impl_name(self): return self.__class__.__name__ def __call__(self, *args, **kwargs): if self.__class__.__module__.startswith('chainer.'): msg = '''\ Chainer's built-in function class object ({}) which is derived from \ chainer.FunctionNode has been called as if it were a callable. \ Use FunctionNode.apply() method instead. Furthermore, it's not recommended that you use built-in function classes \ directly; use corresponding function aliases (those with snake_case name, \ such as F.convolution_nd) instead.\ '''.format(self.__class__.__name__) else: msg = '''\ A function class object ({}) which is derived from \ chainer.FunctionNode has been called as if it were a callable. \ Use apply() method instead.\ '''.format(self.__class__.__name__) raise RuntimeError(msg) def apply(self, inputs): """Computes output variables and grows the computational graph. Basic behavior is expressed in the documentation of :class:`FunctionNode`. .. note:: If the :data:`~Variable.data` attributes of the input variables exist on a GPU device, that device is made current before calling :meth:`forward`, so implementers do not need to take care of device selection in most cases. Args: inputs: Tuple of input variables. Each element can be either :class:`~chainer.Variable` or :ref:`ndarray`. If the element is an ndarray, it is automatically wrapped with :class:`~chainer.Variable`. Returns: A tuple of output :class:`~chainer.Variable` objects. """ chainerx_in_data = None chainerx_device = None is_chainerx, in_data = _extract_apply_in_data(inputs) utils._check_arrays_forward_compatible(in_data, self.label) if is_chainerx: # Try ChainerX C++ implementation. # If it's supported, the output arrays are wrapped with Variables # and returned. # If not supported, FunctionNode.forward_chainerx should return # Fallback. # In that case the input arrays are converted to numpy.ndarray # or cupy.ndarray (depending on the ChainerX backend) and # forward computation falls back to the conventional # FunctionNode.forward() implementaion. outputs = self.forward_chainerx(in_data) if outputs is not chainer.Fallback: # Supported. Wrap with variables and return assert isinstance(outputs, tuple) return tuple([ variable.Variable._init_unchecked( y, requires_grad=y.is_backprop_required(), is_chainerx_array=True) for y in outputs]) # Fall back to FunctionNode.forward() chainerx_in_data, in_data, chainerx_device = ( self._chainerx_apply_fallback_preprocess(in_data, inputs)) self._is_chainerx_fallback_mode = True self.chainerx_device = chainerx_device is_debug = chainer.is_debug() if is_debug: # Keep stack trace for debug self.stack = traceback.extract_stack() input_vars = [chainer.as_variable(x) for x in inputs] self._input_layouts = tuple([x.layout for x in input_vars]) if configuration.config.type_check: self._check_data_type_forward(in_data) self.check_layout_forward(input_vars) # Call preprocess hooks hooks = chainer.get_function_hooks() if self._n_local_function_hooks > 0: hooks = collections.OrderedDict(hooks) hooks.update(self.local_function_hooks) hooks = hooks.values() # avoid six for performance for hook in hooks: hook.forward_preprocess(self, in_data) # Forward propagation with chainer.using_device(backend.get_device_from_array(*in_data)): self._input_indexes_to_retain = None self._output_indexes_to_retain = None if chainer.config.schedule_func is not None: outputs = static_forward_optimizations(self, in_data) elif self._is_chainerx_fallback_mode: # In ChainerX fallback, __class__ is temporarily replaced with # the fabricated one with automatic attirbute fallback. with _chainerx_attribute_fallback(self, chainerx_device): outputs = self.forward(in_data) else: # In normal case, simply run the forward method. outputs = self.forward(in_data) # Check for output array types if not isinstance(outputs, tuple): raise TypeError( 'forward output must be a tuple ({})\n' 'Actual: {}'.format(self.label, type(outputs))) if self.is_elementwise: if not all([y.shape == outputs[0].shape for y in outputs]): raise RuntimeError( 'An elementwise function returned outputs with ' 'different shapes.\n' 'Function: {}\n' 'Input shapes: {}\n' 'Output shapes: {}'.format( self.label, ', '.join(repr(x.shape) for x in outputs), ', '.join(repr(y.shape) for y in outputs), )) if not chainer.is_arrays_compatible(outputs): if not all( isinstance(y, chainer.get_array_types()) for y in outputs): raise TypeError( 'forward output must be a tuple of ndarrays.\n' 'Function: {}\n' 'Actual output types: {}' .format( self.label, tuple(type(y) for y in outputs))) raise TypeError( 'incompatible array types are mixed in the forward output ' '({}).\n' 'Actual: {}'.format( self.label, ', '.join(str(type(x)) for x in outputs))) # If output layouts is not specified, assign the default layouts. if self.is_elementwise: assert self._output_layouts is None layout = self._input_layouts[0] self._output_layouts = (layout,) * len(outputs) elif self._output_layouts is None: self._output_layouts = (None,) * len(outputs) # Call postprocess hooks for hook in hooks: hook.forward_postprocess(self, in_data) # NaN check of output values if is_debug: for out in outputs: if out is not None and chainer.backend._contains_nan(out): msg = ('NaN is detected on forward computation of ' '{}'.format(self.label)) raise RuntimeError(msg) self._output_count = len(outputs) if self._is_chainerx_fallback_mode: ret = self._chainerx_apply_fallback_postprocess( chainerx_device, chainerx_in_data, inputs, outputs) else: requires_grad = any([x.requires_grad for x in input_vars]) ret = tuple( [variable.Variable( y, requires_grad=requires_grad, layout=layout) for y, layout in zip(outputs, self.output_layouts)]) if configuration.config.enable_backprop: # Topological ordering self.rank = max( [x.rank for x in input_vars]) if input_vars else 0 # Add backward edges for y in ret: y.creator_node = self self.inputs = tuple([x.node for x in input_vars]) # Add forward edges (must be weak references) self.outputs = tuple([weakref.ref(y.node) for y in ret]) if self._input_indexes_to_retain is not None: for index in self._input_indexes_to_retain: input_vars[index].retain_data() if self._output_indexes_to_retain is not None: retained_data = [] for index in self._output_indexes_to_retain: ret[index].retain_data() retained_data.append(outputs[index]) self._retained_output_data = tuple(retained_data) self.lazy_grad_sum = configuration.config.lazy_grad_sum return ret def _check_data_type_forward(self, in_data): in_layouts = self.input_layouts in_shapes = None if any([layout is not None for layout in in_layouts]): in_shapes = tuple([ chainer.memory_layouts._transpose_shape(x.shape, layout, None) for x, layout in zip(in_data, in_layouts)]) in_type = type_check.get_light_types(in_data, shapes=in_shapes) try: with type_check.light_mode: self.check_type_forward(in_type) return except type_check.InvalidType: # Ignore errors on first run pass in_type = type_check.get_types( in_data, 'in_types', False, shapes=in_shapes) with type_check.get_function_check_context(self): self.check_type_forward(in_type) def check_type_forward(self, in_types): """Checks types of input data before forward propagation. This method is called before :meth:`forward` and validates the types of input variables using :ref:`the type checking utilities <type-check-utils>`. Args: in_types (~chainer.utils.type_check.TypeInfoTuple): The type information of input variables for :meth:`forward`. """ pass def check_layout_forward(self, inputs): if self.is_elementwise: if not all([x.layout == inputs[0].layout for x in inputs]): raise RuntimeError( 'Inputs with mixed memory layouts were given to ' 'an elementwise function.\n' 'Function: {}\n' 'Input layouts: {}\n'.format( self.label, ', '.join(str(x.layout) for x in inputs), )) else: if not all([x.layout is None for x in inputs]): raise RuntimeError( 'Inputs with non-standard layouts were given to ' 'a function without explicit `check_layout_forward` ' 'implementation.\n' 'Function: {}\n' 'Input layouts: {}\n'.format( self.label, ', '.join(str(x.layout) for x in inputs), )) def _chainerx_apply_fallback_preprocess(self, in_data, inputs): chainerx_in_data = in_data in_data = [] device = None for data, x in six.moves.zip(chainerx_in_data, inputs): if data is None: fallback_data = None else: # Use the cached fallback arrays as inputs if they exist. x_is_variable = isinstance(x, variable.Variable) if x_is_variable and x._chainerx_fallback_array is not None: fallback_data = x._chainerx_fallback_array if device is None: device = x.device else: fallback_data = backend.from_chx(data) if device is None: device = backend.ChainerxDevice(data.device) # Update the fallback cache if possible. if x_is_variable: x._chainerx_fallback_array = fallback_data in_data.append(fallback_data) in_data = tuple(in_data) return chainerx_in_data, in_data, device def _chainerx_apply_fallback_postprocess( self, chainerx_device, chainerx_in_data, inputs, outputs): # TODO(hvy): Take configuration.config.enable_backprop into # account? chainerx_out_data = chainerx_device.send(outputs) # Insert a ChainerX op-node that calls FunctionNode.backward in # backprop. Note that chainerx_out_data may not require gradients. chainerx._core._function_node_forward( self, chainerx_in_data, chainerx_out_data, [] if self._input_indexes_to_retain is None else self._input_indexes_to_retain, [] if self._output_indexes_to_retain is None else self._output_indexes_to_retain) self.inputs = tuple([ None if x is None else variable._ChainerxVariableNodeProps(x) for x in inputs]) ret = tuple([ _to_variable_with_chainerx_fallback_array( chainerx_device, chainerx_out_array, out_array) for chainerx_out_array, out_array in six.moves.zip(chainerx_out_data, outputs)]) return ret def forward_chainerx(self, inputs): """Computes the output arrays from the input ChainerX arrays. This method may check the input arrays and other attributes to see if the computation can be done using ChainerX implementation. If it's not supported, :data:`chainer.Fallback` should be returned instead of output arrays. In that case, computation using conventional Python implementation will be performed. Args: inputs: Tuple of input array(s). Returns: Tuple of output array(s) or :data:`chainer.Fallback`\\ . """ return chainer.Fallback def forward(self, inputs): """Computes the output arrays from the input arrays. It delegates the procedure to :meth:`forward_cpu` or :meth:`forward_gpu` by default. Which of them this method selects is determined by the type of input arrays. Implementations of :class:`FunctionNode` must implement either CPU/GPU methods or this method. Args: inputs: Tuple of input array(s). Returns: Tuple of output array(s). .. warning:: Implementations of :class:`FunctionNode` must take care that the return value must be a tuple even if it returns only one array. """ assert len(inputs) > 0 if isinstance(inputs[0], cuda.ndarray): return self.forward_gpu(inputs) return self.forward_cpu(inputs) def forward_cpu(self, inputs): """Computes the output arrays from the input NumPy arrays. Args: inputs: Tuple of input :class:`numpy.ndarray` objects. Returns: Tuple of output arrays. Each element can be NumPy or CuPy arrays. .. warning:: Implementation of :class:`FunctionNode` must take care that the return value must be a tuple even if it returns only one array. """ raise NotImplementedError def forward_gpu(self, inputs): """Computes the output arrays from the input CuPy arrays. Args: inputs: Tuple of input :class:`cupy.ndarray` objects. Returns: Tuple of output arrays. Each element can be NumPy or CuPy arrays. .. warning:: Implementation of :class:`FunctionNode` must take care that the return value must be a tuple even if it returns only one array. """ raise NotImplementedError @property def input_layouts(self): assert self._input_layouts is not None return self._input_layouts @property def output_layouts(self): assert self._output_layouts is not None return self._output_layouts @output_layouts.setter def output_layouts(self, layouts): assert isinstance(layouts, tuple) self._output_layouts = layouts def retain_inputs(self, indexes): """Lets specified input variable nodes keep data arrays. By calling this method from :meth:`forward`, the function node can specify which inputs are required for backprop. The input variables with retained arrays can then be obtained by calling :meth:`get_retained_inputs` from inside :meth:`backward`. Unlike :class:`~chainer.Function`, the function node **DOES NOT** keep input arrays by default. If you want to keep some or all input arrays, do not forget to call this method. Note that **this method must not be called from the outside of** :meth:`forward`. Args: indexes (iterable of int): Indexes of input variables that the function will require for backprop. """ self._input_indexes_to_retain = indexes def retain_outputs(self, indexes): """Lets specified output variable nodes keep data arrays. By calling this method from :meth:`forward`, the function node can specify which outputs are required for backprop. If this method is not called, no output variables will be marked to keep their data array at the point of returning from :meth:`apply`. The output variables with retained arrays can then be obtained by calling :meth:`get_retained_outputs` from inside :meth:`backward`. .. note:: It is recommended to use this method if the function requires some or all output arrays in backprop. The function can also use output arrays just by keeping references to them directly, although it might affect the performance of later function applications on the output variables. Note that **this method must not be called from the outside of** :meth:`forward`. Args: indexes (iterable of int): Indexes of output variables that the function will require for backprop. """ self._output_indexes_to_retain = indexes def backward(self, target_input_indexes, grad_outputs): """Computes gradients w.r.t.\\ specified inputs given output gradients. This method is used to compute one step of the backpropagation corresponding to the forward computation of this function node. Given the gradients w.r.t. output variables, this method computes the gradients w.r.t. specified input variables. Note that this method does not need to compute any input gradients not specified by ``target_input_indices``. Unlike :meth:`Function.backward() <chainer.Function.backward>`, gradients are given as :class:`~chainer.Variable` objects and this method itself has to return input gradients as :class:`~chainer.Variable` objects. It enables the function node to return the input gradients with the full computational history, in which case it supports *differentiable backpropagation* or *higher-order differentiation*. The default implementation returns ``None`` s, which means the function is not differentiable. Args: target_input_indexes (tuple of int): Sorted indices of the input variables w.r.t. which the gradients are required. It is guaranteed that this tuple contains at least one element. grad_outputs (tuple of :class:`~chainer.Variable`\\ s): Gradients w.r.t. the output variables. If the gradient w.r.t. an output variable is not given, the corresponding element is ``None``. Returns: Tuple of variables that represent the gradients w.r.t. specified input variables. The length of the tuple can be same as either ``len(target_input_indexes)`` or the number of inputs. In the latter case, the elements not specified by ``target_input_indexes`` will be discarded. .. seealso:: :meth:`backward_accumulate` provides an alternative interface that allows you to implement the backward computation fused with the gradient accumulation. """ return (None,) * len(target_input_indexes) def backward_accumulate(self, target_input_indexes, grad_outputs, grad_inputs): """Computes gradients w.r.t.\\ specified inputs and accumulates them. This method provides a way to fuse the backward computation and the gradient accumulations in the case that the multiple functions are applied to the same variable. Users have to override either of this method or :meth:`backward`. It is often simpler to implement :meth:`backward` and is recommended if you do not need to provide efficient gradient accumulation. Args: target_input_indexes (tuple of int): Sorted indices of the input variables w.r.t. which the gradients are required. It is guaranteed that this tuple contains at least one element. grad_outputs (tuple of Variable): Gradients w.r.t. the output variables. If the gradient w.r.t. an output variable is not given, the corresponding element is ``None``. grad_inputs (tuple of Variable): Gradients w.r.t. the input variables specified by ``target_input_indexes``. These values are computed by other computation paths. If there is no gradient value existing for the variable, the corresponding element is ``None``. See also the note below. Returns: Tuple of variables that represent the gradients w.r.t. specified input variables. Unlike :meth:`backward`, the length of the tuple **must** be same as that of ``target_input_indices``. .. note:: Gradient variables in ``grad_outputs`` are distinct, even if a variable is passed to multiple input arguments of the function. This is an implementation-detail convention to avoid the complication of correctly accumulating gradients in such a case. Usually, only the first position of ``grad_inputs`` corresponding to these input arguments may contain the gradient variable corresponding to that input variable, and other entries are set to ``None``. This is not the case with the ``lazy_grad_sum`` feature. This behavior might be changed in a future version. """ # If backward_accumulate is implemented, it should be equivalent to # the following code using backward(). This code is provided for the # convenience, and it's *not* used unless you override it. You don't # have to use backward(). assert isinstance(target_input_indexes, tuple) assert isinstance(grad_outputs, tuple) assert isinstance(grad_inputs, tuple) gxs = self._backward_target_inputs(target_input_indexes, grad_outputs) return tuple([gx if g_input is None else g_input if gx is None else gx + g_input for gx, g_input in six.moves.zip(gxs, grad_inputs)]) def _backward_chainerx(self, target_input_indexes, grad_outputs, retained_inputs, retained_outputs): # Backward wrapper that is called from C++ via a Python binding in case # self.apply was called with chainerx.ndarrays. assert self._is_chainerx_fallback_mode assert len(target_input_indexes) > 0 assert ( (self._input_indexes_to_retain is None and len(retained_inputs) == 0) or (len(self._input_indexes_to_retain) == len(retained_inputs))) assert ( (self._output_indexes_to_retain is None and len(retained_outputs) == 0) or (len(self._output_indexes_to_retain) == len(retained_outputs))) assert all([ a is None or isinstance(a, chainerx.ndarray) for a in grad_outputs]) self._chainerx_retained_inputs = tuple([ None if array is None else variable.Variable( array, requires_grad=array.is_backprop_required()) for array in retained_inputs]) self._chainerx_retained_outputs = tuple([ None if array is None else variable.Variable( array, requires_grad=( False if array is None else array.is_backprop_required())) for array in retained_outputs]) device = backend.get_device_from_array( *(retained_inputs + retained_outputs + grad_outputs)) with chainer.using_device(device): gxs = self._backward_target_inputs( tuple(target_input_indexes), tuple([ None if gy is None else chainer.Variable( gy, requires_grad=gy.is_backprop_required()) for gy in grad_outputs])) gx_arrs = [gx._data[0] for gx in gxs] assert all([isinstance(gx, chainerx.ndarray) for gx in gx_arrs]) return gx_arrs def _backward_target_inputs(self, target_input_indexes, grad_outputs): # Filters out input gradients that are not required and returns the # rest. assert all([ gy is None or yl == gy.layout for yl, gy in zip(self.output_layouts, grad_outputs)]) gxs = self.backward(target_input_indexes, grad_outputs) len_gxs = len(gxs) if len_gxs == len(self.inputs): gxs = tuple([gxs[i] for i in target_input_indexes]) else: assert len_gxs == len(target_input_indexes) return gxs def _get_error_message(self, message): lines = [ message, ' function={} ({})'.format(self._impl_name, self.label) ] if self.inputs: for i, input in enumerate(self.inputs): lines.append( ' input {}: shape={} dtype={}'.format( i, input.shape, input.dtype)) if self.outputs: for i, output_ref in enumerate(self.outputs): output = output_ref() if output is None: lines.append( ' output {}: not available') else: lines.append( ' output {}: shape={} dtype={}'.format( i, output.shape, output.dtype)) return '\n'.join(lines) def get_retained_inputs(self): """Returns a tuple of retained input variables. This method is used to retrieve the input variables retained in :meth:`forward`. Returns: A tuple of retained input variables, if available. Otherwise return `None`. """ if self._is_chainerx_fallback_mode: return self._chainerx_retained_inputs if self._input_indexes_to_retain is None or self.inputs is None: return () retained_inputs = [] for index in self._input_indexes_to_retain: input = self.inputs[index] if input.data is None: retained_inputs.append(None) else: retained_inputs.append(input.get_variable()) return tuple(retained_inputs) def get_retained_outputs(self): """Returns a tuple of retained output variables. This method is used to retrieve the output variables retained in :meth:`forward`. Returns: A tuple of retained output variables, if available. Otherwise return `None`. .. note:: This method does a tricky thing to support the case of an output node garbage-collected before this method is called; in this case, this method creates a fresh variable node that acts as an output node of the function node. """ if self._is_chainerx_fallback_mode: return self._chainerx_retained_outputs if self._output_indexes_to_retain is None or self.outputs is None: return () # TODO(hvy): It should be safe to remove this check. if self._retained_output_data is None: raise ValueError(self._get_error_message( 'retain_outputs is not called in forward.')) ret = [] outputs = self.outputs new_outputs = list(outputs) outputs_modified = False for index, data in six.moves.zip(self._output_indexes_to_retain, self._retained_output_data): output = outputs[index]() if output is None: # The output node is garbage collected, so create a fresh # Variable object. output_var = variable.Variable(data) output_var.creator_node = self new_outputs[index] = weakref.ref(output_var.node) outputs_modified = True else: output_var = output.get_variable() if output_var.raw_array is None: ret.append(None) else: ret.append(output_var) if outputs_modified: self.outputs = tuple(new_outputs) return tuple(ret) def unchain(self): """Purges in/out nodes and this function node itself from the graph.""" if self._is_chainerx_fallback_mode: raise NotImplementedError( 'Unchaining is not yet supported in ChainerX fallback mode.') for y in self.outputs: y_ref = y() if y_ref is not None: y_ref.unchain() self.inputs = None self.outputs = None def add_hook(self, hook, name=None): """Registers a function hook. Args: hook (~chainer.FunctionHook): Function hook to be registered. name (str): Name of the function hook. The name must be unique among function hooks registered to this function. If ``None``, the default name of the function hook is used. """ if not isinstance(hook, function_hook.FunctionHook): raise TypeError('Hook must be of type FunctionHook') if name is None: name = hook.name hooks = self.local_function_hooks if name in hooks: raise KeyError('Hook %s already exists' % name) hooks[name] = hook hook.added(self) def delete_hook(self, name): """Unregisters the function hook. Args: name (str): The name of the function hook to be unregistered. """ if name in self.local_function_hooks: self.local_function_hooks[name].deleted(self) del self.local_function_hooks[name] else: raise KeyError('Hook %s does not exist' % name) def grad(outputs, inputs, grad_outputs=None, grad_inputs=None, set_grad=False, retain_grad=False, enable_double_backprop=False, loss_scale=None): """Computes the gradient of output variables w.r.t.\\ the input variables. This function implements the backpropagation algorithm. While :meth:`Variable.backward` also implements backprop, this function selects the smallest paths in the computational graph needed to compute the gradients w.r.t. inputs. The error is backpropagated only through these selected paths, which may reduce the overall computational cost. This function also differs from :meth:`Variable.backward` in the way to return the gradients; it directly returns the gradient variables as a list instead of setting gradients to the :attr:`Variable.grad_var` attribute of the original variable. It means users do not need to clear the gradient w.r.t. each variable before computing the gradient using this function. If ``set_grad`` option is set to ``True``, the computed gradient is also stored in the :attr:`Variable.grad_var` attribute of each variable, in which case any original value of :attr:`Variable.grad_var` will be updated even if it had already been set. Args: outputs (tuple or list of :class:`~chainer.Variable`): A sequence of output variables from which backprop starts. inputs (tuple or list of :class:`~chainer.Variable`): A sequence of input variables each of which this function computes the gradient w.r.t. grad_outputs (tuple or list of :class:`~chainer.Variable` or None): A sequence of variables that gives the initial value of each output gradient. If an element is set to ``None``, an array filled with 1 is used. If this argument itself is ``None``, it is treated as a sequence of ``None``\\ s. grad_inputs (tuple or list of :class:`~chainer.Variable` or None): A sequence of variables that gives the initial value of each input gradient. The gradients computed by the backprop algorithm are accumulated to them (not in-place). If an element is set to ``None``, the gradient is not accumulated to this value. If this argument itself is ``None``, it is treated as a sequence of ``None``\\ s. set_grad (bool): If it is ``True``, the :attr:`Variable.grad_var` attribute of each input variable is set to the corresponding computed gradient variable. retain_grad (bool): If it is ``True``, the gradients w.r.t. all the intermediate variables are stored in the :attr:`Variable.grad_var` attribute. In this case, the ``set_grad`` option is ignored. enable_double_backprop (bool): If it is ``True``, the computed gradients can be further backpropagated. Enabling it may increase the memory consumption (and possibly the computational time) to remember the intermediate gradient values for the second backpropagation. loss_scale (float): Loss scaling factor. Loss scaling is a useful technique to mitigate vanishing gradient issue that tends to happen when low precision data type like float16 is used during training. If you set loss scaling factor, gradients of loss values are to be multiplied by the factor before backprop starts. The factor is propagated to whole gradients in a computational graph along the backprop. The gradients of parameters are divided by the factor just before the parameters are to be updated. Returns: A list of gradient variables w.r.t. the inputs. """ if not isinstance(outputs, (tuple, list)): raise TypeError( 'outputs must be a tuple or a list, not {}.'.format(type(outputs))) if not isinstance(inputs, (tuple, list)): raise TypeError( 'inputs must be a tuple or a list, not {}.'.format(type(inputs))) if grad_outputs is not None: if not isinstance(grad_outputs, (tuple, list)): raise TypeError( 'grad_outputs must be a tuple or a list or None, not {}.' .format(type(grad_outputs))) if len(outputs) != len(grad_outputs): raise ValueError( 'grad_outputs must be of the same length as outputs.\n' 'len(outputs) = {}, len(grad_outputs) = {}' .format(len(outputs), len(grad_outputs))) if grad_inputs is not None: if not isinstance(grad_inputs, (tuple, list)): raise TypeError( 'grad_inputs must be a tuple or a list or None, not {}.' .format(type(grad_inputs))) if len(inputs) != len(grad_inputs): raise ValueError( 'grad_inputs must be of the same length as inputs.\n' 'len(inputs) = {}, len(grad_inputs) = {}' .format(len(inputs), len(grad_inputs))) # Check if all the inputs are chainerx arrays and if so # Relies in chainerx.grad function n_chx_inputs = sum([False if x is None else x._has_chainerx_array for x in inputs]) if n_chx_inputs == len(inputs): # Need to access the arrays to invoke the chainer grad function if grad_outputs: grad_outputs_chx = [x._data[0] for x in grad_outputs] else: grad_outputs_chx = [] outputs_chx = [x._data[0] for x in outputs] inputs_chx = [x._data[0] for x in inputs] # pybind has issues when converting opt<int> -> opt<float> if loss_scale is not None: loss_scale = float(loss_scale) grads = chainerx.grad(outputs_chx, inputs_chx, backprop_id=None, enable_double_backprop=enable_double_backprop, set_grad=set_grad, retain_grad=retain_grad, grad_outputs=grad_outputs_chx, loss_scale=loss_scale) if grad_inputs: grads = [g+gi._data[0] for g, gi in zip(grads, grad_inputs)] return [variable.Variable(g, requires_grad=g.is_backprop_required()) for g in grads] elif n_chx_inputs > 0: raise TypeError( 'Mixing chainerx and non-chainerx variables is not allowed') for v in outputs: # Raise error here if v is created by Function.backward. # In such case, we don't know exact inputs of the creator. v.node._check_old_style_gradient() # The implementation consists of three steps. # 1. Backward enumeration: all the nodes reachable backward from the output # nodes are enumerated. The forward direction links are collected in # this step. Note that the variable nodes whose requires_grad is false # are ignored and their creators are not searched. candidate_funcs = [v.creator_node for v in outputs if v.creator_node is not None] visited_funcs = set() forward_graph = collections.defaultdict(list) while candidate_funcs: func = candidate_funcs.pop() if func in visited_funcs: continue visited_funcs.add(func) for x in func.inputs: # Raise error here if x is created by Function.backward. # In such case, we don't know exact inputs of the creator. x._check_old_style_gradient() if not x.requires_grad: continue forward_graph[x].append(func) creator = x.creator_node if creator is not None and creator not in visited_funcs: candidate_funcs.append(creator) # 2. Forward enumeration: all the nodes in the subgraph reachable from the # input nodes are enumerated. The extracted (sub-)subgraph is the union # of all paths that backpropagation will visit. candidate_vars = [x.node for x in inputs] visited_funcs = set() grad_required = set() while candidate_vars: x = candidate_vars.pop() grad_required.add(x) for func in forward_graph[x]: if func in visited_funcs: continue visited_funcs.add(func) for y_ref in func.outputs: y = y_ref() if y is not None and y in forward_graph: candidate_vars.append(y) # 3. Backpropagation: the backpropagation is executed along the # (sub-)subgraph. It uses the topological order of the subgraph which is # induced by the reversed order of function applications ("rank"). grads = _backprop_utils.GradTable() # Initialize the gradient mapping. if grad_outputs is None: grad_outputs = (None,) * len(outputs) for y, gy in zip(outputs, grad_outputs): if gy is None: with chainer.using_device(y.device): gy_data = y.device.xp.ones_like(y.array) gy = variable.Variable(gy_data, requires_grad=False) if loss_scale is not None: gy.data *= loss_scale grads[y.node] = gy if grad_inputs is not None: for x, gx in zip(inputs, grad_inputs): if gx is not None: grads[x.node] = gx # Backprop implementation. It edits grads which will only contain the # gradients w.r.t. the inputs. with chainer.using_config('enable_backprop', enable_double_backprop): ret_dict = _backprop( outputs, inputs, grad_required, retain_grad, grads, loss_scale) # Extract the gradients w.r.t. the inputs and return them. ret = [ret_dict[x.node] for x in inputs] if set_grad: for x, gx in zip(inputs, ret): x.grad_var = gx return ret def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale): candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap() for y in outputs: creator = y.creator_node if creator is not None: push_candidate(creator) input_nodes = set(x.node for x in inputs) ret_dict = {} is_debug = chainer.is_debug() base_hooks = chainer.get_function_hooks().values() while candidate_funcs: func = pop_candidate() # Collect the gradients w.r.t. the outputs ys = [y() for y in func.outputs] # access via weak ref gys = tuple([grads.pop(y) if y is not None and y.creator_node is not None else None for y in ys]) for node, gy in six.moves.zip(ys, gys): if node is not None: if node in input_nodes: ret_dict[node] = gy if retain_grad: y = node.get_variable_or_none() if y is not None: y.grad_var = gy y._loss_scale = loss_scale # Collect the gradients w.r.t. the inputs input_indexes = [] x_grads = collections.OrderedDict() for i, x in enumerate(func.inputs): if x not in grad_required: continue input_indexes.append(i) if x not in x_grads: x_grads[x] = grads.get_as_list(x) if not input_indexes: continue input_indexes = tuple(input_indexes) # Do backward # Call pre-backward hooks if func._n_local_function_hooks != 0: local_hooks = collections.OrderedDict(chainer.get_function_hooks()) local_hooks.update(func.local_function_hooks) hooks = local_hooks.values() # avoid six for performance else: hooks = base_hooks in_data = [x.data for x in func.inputs] out_grad_data = [None if g is None else g.data for g in gys] with chainer.using_device(backend.get_device_from_array(*in_data)): for hook in hooks: hook.backward_preprocess( func, tuple(in_data), tuple(out_grad_data)) _backprop_utils.backprop_step(func, input_indexes, gys, x_grads, is_debug) # Call post-backward hooks for hook in hooks: hook.backward_postprocess( func, tuple(in_data), tuple(out_grad_data)) # Update grads for node, g in x_grads.items(): if not g: # gradient == None continue creator = node.creator_node if creator is not None: push_candidate(creator) for x in input_nodes: if x not in ret_dict: ret_dict[x] = grads.pop(x) return ret_dict def _extract_apply_in_data(inputs): # Extracts arrays from FunctionNode.apply() inputs. # # A flag that indicates whether inputs are chainerx arrays is also # returned. # # Each object in `inputs` may be `Variable` or an array. # If it's a `Variable` and its underlying array is a chainerx array, # `Variable._data[0]` (which is backproppable in contrast to # `Variable.array`) is returned. # # If at least one of the arrays is a ChainerX array, all other # arrays need to be ChainerX arrays. if not inputs: return False, () if chainerx.is_available(): has_chainerx_array = False # Unwrap arrays arrays = [] for x in inputs: if isinstance(x, variable.Variable): arrays.append(x._data[0]) if x._has_chainerx_array: has_chainerx_array = True else: # x is ndarray arrays.append(x) if not has_chainerx_array: if isinstance(x, chainerx.ndarray): has_chainerx_array = True return has_chainerx_array, tuple(arrays) else: return False, tuple([ x.raw_array if isinstance(x, variable.Variable) else x for x in inputs]) def _get_ordered_func_heap(): heap = [] visited_funcs = set() def push_heap(func): if func not in visited_funcs: # Negate since heapq is min-heap # The second element is used to make each item unique ordered_func = -func.rank, len(visited_funcs), func visited_funcs.add(func) heapq.heappush(heap, ordered_func) def pop_heap(): _, _, func = heapq.heappop(heap) return func return heap, push_heap, pop_heap def _make_chainerx_attribute_fallback_class(obj, device): # Creates a fabricated class based on a concerete class # (either FunctionNode or Function), # equipped with the automatic attribute fallback. This is enabled # during FunctionNode.forward(), Function.forward() and # Function.backward(). # # In the fallback mechanism, when an array with the fallback ndarray # type (e.g. numpy.ndarray for ChainerX native devices) is assigned # as an attribute, it's automatically converted to a ChainerX ndarray # with the corresponding ChainerX device and stored in that form. # Conversely, when an attribute with ChainerX ndarray type is queried, # it's converted to the fallback ndarray before being returned. # That way, concrete function implementations can use attributes # as ndarray storage, without converting from/to ChainerX manually. # # Note that it works only if the attribute has an ndarray type. If the # array is wrapped in a tuple, for example, no automatic conversion # will be taken place. fallback_device = device.fallback_device sup = super(obj.__class__, obj) # Cache to avoid converting same arrays multiple times fallback_array_cache = {} # self.__getattribute__ for fallback arrays def getattribute(self, name): value = sup.__getattribute__(name) if isinstance(value, chainerx.ndarray): fallback_arr = fallback_array_cache.get(name) if fallback_arr is None: fallback_arr = backend.from_chx(value) fallback_array_cache[name] = fallback_arr return fallback_arr return value # self.__setattr__ for fallback arrays def setattr(self, name, value): if isinstance(value, fallback_device.xp.ndarray): fallback_array_cache[name] = value sup.__setattr__(name, backend.to_chx(value)) return sup.__setattr__(name, value) # Return a fabricated FunctionNode class new_class = type( obj.__class__.__name__, inspect.getmro(obj.__class__), { '__getattribute__': getattribute, '__setattr__': setattr, }) return new_class @contextlib.contextmanager def _chainerx_attribute_fallback(obj, chainerx_device): old_class = obj.__class__ obj.__class__ = _make_chainerx_attribute_fallback_class( obj, chainerx_device) try: yield finally: obj.__class__ = old_class
fyffyt/scikit-learn
refs/heads/master
sklearn/_build_utils.py
280
""" Utilities useful during the build. """ # author: Andy Mueller, Gael Varoquaux # license: BSD from numpy.distutils.system_info import get_info def get_blas_info(): def atlas_not_found(blas_info_): def_macros = blas_info.get('define_macros', []) for x in def_macros: if x[0] == "NO_ATLAS_INFO": # if x[1] != 1 we should have lapack # how do we do that now? return True if x[0] == "ATLAS_INFO": if "None" in x[1]: # this one turned up on FreeBSD return True return False blas_info = get_info('blas_opt', 0) if (not blas_info) or atlas_not_found(blas_info): cblas_libs = ['cblas'] blas_info.pop('libraries', None) else: cblas_libs = blas_info.pop('libraries', []) return cblas_libs, blas_info
lckung/spark-ec2
refs/heads/branch-1.5
launch-script/lib/boto-2.34.0/tests/integration/cognito/__init__.py
112
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto from tests.compat import unittest class CognitoTest(unittest.TestCase): def setUp(self): self.cognito_identity = boto.connect_cognito_identity() self.cognito_sync = boto.connect_cognito_sync() self.identity_pool_name = 'myIdentityPool' response = self.cognito_identity.create_identity_pool( identity_pool_name=self.identity_pool_name, allow_unauthenticated_identities=False ) self.identity_pool_id = response['IdentityPoolId'] def tearDown(self): self.cognito_identity.delete_identity_pool( identity_pool_id=self.identity_pool_id )
mozillazg/Unidecode
refs/heads/master
unidecode/x05e.py
250
data = ( 'Za ', # 0x00 'Bi ', # 0x01 'Shi ', # 0x02 'Bu ', # 0x03 'Ding ', # 0x04 'Shuai ', # 0x05 'Fan ', # 0x06 'Nie ', # 0x07 'Shi ', # 0x08 'Fen ', # 0x09 'Pa ', # 0x0a 'Zhi ', # 0x0b 'Xi ', # 0x0c 'Hu ', # 0x0d 'Dan ', # 0x0e 'Wei ', # 0x0f 'Zhang ', # 0x10 'Tang ', # 0x11 'Dai ', # 0x12 'Ma ', # 0x13 'Pei ', # 0x14 'Pa ', # 0x15 'Tie ', # 0x16 'Fu ', # 0x17 'Lian ', # 0x18 'Zhi ', # 0x19 'Zhou ', # 0x1a 'Bo ', # 0x1b 'Zhi ', # 0x1c 'Di ', # 0x1d 'Mo ', # 0x1e 'Yi ', # 0x1f 'Yi ', # 0x20 'Ping ', # 0x21 'Qia ', # 0x22 'Juan ', # 0x23 'Ru ', # 0x24 'Shuai ', # 0x25 'Dai ', # 0x26 'Zheng ', # 0x27 'Shui ', # 0x28 'Qiao ', # 0x29 'Zhen ', # 0x2a 'Shi ', # 0x2b 'Qun ', # 0x2c 'Xi ', # 0x2d 'Bang ', # 0x2e 'Dai ', # 0x2f 'Gui ', # 0x30 'Chou ', # 0x31 'Ping ', # 0x32 'Zhang ', # 0x33 'Sha ', # 0x34 'Wan ', # 0x35 'Dai ', # 0x36 'Wei ', # 0x37 'Chang ', # 0x38 'Sha ', # 0x39 'Qi ', # 0x3a 'Ze ', # 0x3b 'Guo ', # 0x3c 'Mao ', # 0x3d 'Du ', # 0x3e 'Hou ', # 0x3f 'Zheng ', # 0x40 'Xu ', # 0x41 'Mi ', # 0x42 'Wei ', # 0x43 'Wo ', # 0x44 'Fu ', # 0x45 'Yi ', # 0x46 'Bang ', # 0x47 'Ping ', # 0x48 'Tazuna ', # 0x49 'Gong ', # 0x4a 'Pan ', # 0x4b 'Huang ', # 0x4c 'Dao ', # 0x4d 'Mi ', # 0x4e 'Jia ', # 0x4f 'Teng ', # 0x50 'Hui ', # 0x51 'Zhong ', # 0x52 'Shan ', # 0x53 'Man ', # 0x54 'Mu ', # 0x55 'Biao ', # 0x56 'Guo ', # 0x57 'Ze ', # 0x58 'Mu ', # 0x59 'Bang ', # 0x5a 'Zhang ', # 0x5b 'Jiong ', # 0x5c 'Chan ', # 0x5d 'Fu ', # 0x5e 'Zhi ', # 0x5f 'Hu ', # 0x60 'Fan ', # 0x61 'Chuang ', # 0x62 'Bi ', # 0x63 'Hei ', # 0x64 '[?] ', # 0x65 'Mi ', # 0x66 'Qiao ', # 0x67 'Chan ', # 0x68 'Fen ', # 0x69 'Meng ', # 0x6a 'Bang ', # 0x6b 'Chou ', # 0x6c 'Mie ', # 0x6d 'Chu ', # 0x6e 'Jie ', # 0x6f 'Xian ', # 0x70 'Lan ', # 0x71 'Gan ', # 0x72 'Ping ', # 0x73 'Nian ', # 0x74 'Qian ', # 0x75 'Bing ', # 0x76 'Bing ', # 0x77 'Xing ', # 0x78 'Gan ', # 0x79 'Yao ', # 0x7a 'Huan ', # 0x7b 'You ', # 0x7c 'You ', # 0x7d 'Ji ', # 0x7e 'Yan ', # 0x7f 'Pi ', # 0x80 'Ting ', # 0x81 'Ze ', # 0x82 'Guang ', # 0x83 'Zhuang ', # 0x84 'Mo ', # 0x85 'Qing ', # 0x86 'Bi ', # 0x87 'Qin ', # 0x88 'Dun ', # 0x89 'Chuang ', # 0x8a 'Gui ', # 0x8b 'Ya ', # 0x8c 'Bai ', # 0x8d 'Jie ', # 0x8e 'Xu ', # 0x8f 'Lu ', # 0x90 'Wu ', # 0x91 '[?] ', # 0x92 'Ku ', # 0x93 'Ying ', # 0x94 'Di ', # 0x95 'Pao ', # 0x96 'Dian ', # 0x97 'Ya ', # 0x98 'Miao ', # 0x99 'Geng ', # 0x9a 'Ci ', # 0x9b 'Fu ', # 0x9c 'Tong ', # 0x9d 'Pang ', # 0x9e 'Fei ', # 0x9f 'Xiang ', # 0xa0 'Yi ', # 0xa1 'Zhi ', # 0xa2 'Tiao ', # 0xa3 'Zhi ', # 0xa4 'Xiu ', # 0xa5 'Du ', # 0xa6 'Zuo ', # 0xa7 'Xiao ', # 0xa8 'Tu ', # 0xa9 'Gui ', # 0xaa 'Ku ', # 0xab 'Pang ', # 0xac 'Ting ', # 0xad 'You ', # 0xae 'Bu ', # 0xaf 'Ding ', # 0xb0 'Cheng ', # 0xb1 'Lai ', # 0xb2 'Bei ', # 0xb3 'Ji ', # 0xb4 'An ', # 0xb5 'Shu ', # 0xb6 'Kang ', # 0xb7 'Yong ', # 0xb8 'Tuo ', # 0xb9 'Song ', # 0xba 'Shu ', # 0xbb 'Qing ', # 0xbc 'Yu ', # 0xbd 'Yu ', # 0xbe 'Miao ', # 0xbf 'Sou ', # 0xc0 'Ce ', # 0xc1 'Xiang ', # 0xc2 'Fei ', # 0xc3 'Jiu ', # 0xc4 'He ', # 0xc5 'Hui ', # 0xc6 'Liu ', # 0xc7 'Sha ', # 0xc8 'Lian ', # 0xc9 'Lang ', # 0xca 'Sou ', # 0xcb 'Jian ', # 0xcc 'Pou ', # 0xcd 'Qing ', # 0xce 'Jiu ', # 0xcf 'Jiu ', # 0xd0 'Qin ', # 0xd1 'Ao ', # 0xd2 'Kuo ', # 0xd3 'Lou ', # 0xd4 'Yin ', # 0xd5 'Liao ', # 0xd6 'Dai ', # 0xd7 'Lu ', # 0xd8 'Yi ', # 0xd9 'Chu ', # 0xda 'Chan ', # 0xdb 'Tu ', # 0xdc 'Si ', # 0xdd 'Xin ', # 0xde 'Miao ', # 0xdf 'Chang ', # 0xe0 'Wu ', # 0xe1 'Fei ', # 0xe2 'Guang ', # 0xe3 'Koc ', # 0xe4 'Kuai ', # 0xe5 'Bi ', # 0xe6 'Qiang ', # 0xe7 'Xie ', # 0xe8 'Lin ', # 0xe9 'Lin ', # 0xea 'Liao ', # 0xeb 'Lu ', # 0xec '[?] ', # 0xed 'Ying ', # 0xee 'Xian ', # 0xef 'Ting ', # 0xf0 'Yong ', # 0xf1 'Li ', # 0xf2 'Ting ', # 0xf3 'Yin ', # 0xf4 'Xun ', # 0xf5 'Yan ', # 0xf6 'Ting ', # 0xf7 'Di ', # 0xf8 'Po ', # 0xf9 'Jian ', # 0xfa 'Hui ', # 0xfb 'Nai ', # 0xfc 'Hui ', # 0xfd 'Gong ', # 0xfe 'Nian ', # 0xff )
0k/OpenUpgrade
refs/heads/8.0
openerp/report/render/rml2html/__init__.py
381
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from rml2html import parseString # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
ShadowMyst/creativechain-core
refs/heads/master
qa/rpc-tests/rest.py
50
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test REST interface # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from struct import * from io import BytesIO from codecs import encode import http.client import urllib.parse def deser_uint256(f): r = 0 for i in range(8): t = unpack(b"<I", f.read(4))[0] r += t << (i * 32) return r #allows simple http get calls def http_get_call(host, port, path, response_object = 0): conn = http.client.HTTPConnection(host, port) conn.request('GET', path) if response_object: return conn.getresponse() return conn.getresponse().read().decode('utf-8') #allows simple http post calls with a request body def http_post_call(host, port, path, requestdata = '', response_object = 0): conn = http.client.HTTPConnection(host, port) conn.request('POST', path, requestdata) if response_object: return conn.getresponse() return conn.getresponse().read() class RESTTest (BitcoinTestFramework): FORMAT_SEPARATOR = "." def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self, split=False): self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) self.is_network_split=False self.sync_all() def run_test(self): url = urllib.parse.urlparse(self.nodes[0].url) print("Mining blocks...") self.nodes[0].generate(1) self.sync_all() self.nodes[2].generate(100) self.sync_all() assert_equal(self.nodes[0].getbalance(), 50) txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() self.nodes[2].generate(1) self.sync_all() bb_hash = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1 # load the latest 0.1 tx over the REST API json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) # get n of 0.1 outpoint n = 0 for vout in json_obj['vout']: if vout['value'] == 0.1: n = vout['n'] ###################################### # GETUTXOS: query a unspent outpoint # ###################################### json_request = '/checkmempool/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) #check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) #make sure there is one utxo assert_equal(len(json_obj['utxos']), 1) assert_equal(json_obj['utxos'][0]['value'], 0.1) ################################################ # GETUTXOS: now query a already spent outpoint # ################################################ json_request = '/checkmempool/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) #check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) #make sure there is no utox in the response because this oupoint has been spent assert_equal(len(json_obj['utxos']), 0) #check bitmap assert_equal(json_obj['bitmap'], "0") ################################################## # GETUTXOS: now check both with the same request # ################################################## json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0' json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) assert_equal(json_obj['bitmap'], "10") #test binary response bb_hash = self.nodes[0].getbestblockhash() binaryRequest = b'\x01\x02' binaryRequest += hex_str_to_bytes(txid) binaryRequest += pack("i", n) binaryRequest += hex_str_to_bytes(vintx) binaryRequest += pack("i", 0) bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest) output = BytesIO() output.write(bin_response) output.seek(0) chainHeight = unpack("i", output.read(4))[0] hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64) assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine assert_equal(chainHeight, 102) #chain height must be 102 ############################ # GETUTXOS: mempool checks # ############################ # do a tx and don't sync txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) # get n of 0.1 outpoint n = 0 for vout in json_obj['vout']: if vout['value'] == 0.1: n = vout['n'] json_request = '/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool json_request = '/checkmempool/'+txid+'-'+str(n) json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool #do some invalid requests json_request = '{"checkmempool' response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True) assert_equal(response.status, 400) #must be a 400 because we send a invalid json request json_request = '{"checkmempool' response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True) assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True) assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request #test limits json_request = '/checkmempool/' for x in range(0, 20): json_request += txid+'-'+str(n)+'/' json_request = json_request.rstrip("/") response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True) assert_equal(response.status, 400) #must be a 400 because we exceeding the limits json_request = '/checkmempool/' for x in range(0, 15): json_request += txid+'-'+str(n)+'/' json_request = json_request.rstrip("/") response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True) assert_equal(response.status, 200) #must be a 200 because we are within the limits self.nodes[0].generate(1) #generate block to not affect upcoming tests self.sync_all() ################ # /rest/block/ # ################ # check binary format response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True) assert_equal(response.status, 200) assert_greater_than(int(response.getheader('content-length')), 80) response_str = response.read() # compare with block header response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True) assert_equal(response_header.status, 200) assert_equal(int(response_header.getheader('content-length')), 80) response_header_str = response_header.read() assert_equal(response_str[0:80], response_header_str) # check block hex format response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True) assert_equal(response_hex.status, 200) assert_greater_than(int(response_hex.getheader('content-length')), 160) response_hex_str = response_hex.read() assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160]) # compare with hex block header response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True) assert_equal(response_header_hex.status, 200) assert_greater_than(int(response_header_hex.getheader('content-length')), 160) response_header_hex_str = response_header_hex.read() assert_equal(response_hex_str[0:160], response_header_hex_str[0:160]) assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160]) # check json format block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json') block_json_obj = json.loads(block_json_string) assert_equal(block_json_obj['hash'], bb_hash) # compare with json block header response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True) assert_equal(response_header_json.status, 200) response_header_json_str = response_header_json.read().decode('utf-8') json_obj = json.loads(response_header_json_str, parse_float=Decimal) assert_equal(len(json_obj), 1) #ensure that there is one header in the json response assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same #compare with normal RPC block response rpc_block_json = self.nodes[0].getblock(bb_hash) assert_equal(json_obj[0]['hash'], rpc_block_json['hash']) assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations']) assert_equal(json_obj[0]['height'], rpc_block_json['height']) assert_equal(json_obj[0]['version'], rpc_block_json['version']) assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot']) assert_equal(json_obj[0]['time'], rpc_block_json['time']) assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce']) assert_equal(json_obj[0]['bits'], rpc_block_json['bits']) assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty']) assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork']) assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash']) #see if we can get 5 headers in one response self.nodes[1].generate(5) self.sync_all() response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True) assert_equal(response_header_json.status, 200) response_header_json_str = response_header_json.read().decode('utf-8') json_obj = json.loads(response_header_json_str) assert_equal(len(json_obj), 5) #now we should have 5 header objects # do tx test tx_hash = block_json_obj['tx'][0]['txid'] json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) assert_equal(json_obj['txid'], tx_hash) # check hex format response hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True) assert_equal(hex_string.status, 200) assert_greater_than(int(response.getheader('content-length')), 10) # check block tx details # let's make 3 tx and mine them on node 1 txs = [] txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) self.sync_all() # check that there are exactly 3 transactions in the TX memory pool before generating the block json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) assert_equal(json_obj['size'], 3) # the size of the memory pool should be greater than 3x ~100 bytes assert_greater_than(json_obj['bytes'], 300) # check that there are our submitted transactions in the TX memory pool json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) for tx in txs: assert_equal(tx in json_obj, True) # now mine the transactions newblockhash = self.nodes[1].generate(1) self.sync_all() #check if the 3 tx show up in the new block json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) for tx in json_obj['tx']: if not 'coinbase' in tx['vin'][0]: #exclude coinbase assert_equal(tx['txid'] in txs, True) #check the same but without tx details json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json') json_obj = json.loads(json_string) for tx in txs: assert_equal(tx in json_obj['tx'], True) #test rest bestblock bb_hash = self.nodes[0].getbestblockhash() json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json') json_obj = json.loads(json_string) assert_equal(json_obj['bestblockhash'], bb_hash) if __name__ == '__main__': RESTTest ().main ()
40223121/2015cd_midterm
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/logging/handlers.py
736
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ Additional handlers for the logging package for Python. The core package is based on PEP 282 and comments thereto in comp.lang.python. Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved. To use, simply 'import logging.handlers' and log away! """ import errno, logging, socket, os, pickle, struct, time, re from codecs import BOM_UTF8 from stat import ST_DEV, ST_INO, ST_MTIME import queue try: import threading except ImportError: #pragma: no cover threading = None # # Some constants... # DEFAULT_TCP_LOGGING_PORT = 9020 DEFAULT_UDP_LOGGING_PORT = 9021 DEFAULT_HTTP_LOGGING_PORT = 9022 DEFAULT_SOAP_LOGGING_PORT = 9023 SYSLOG_UDP_PORT = 514 SYSLOG_TCP_PORT = 514 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day class BaseRotatingHandler(logging.FileHandler): """ Base class for handlers that rotate log files at a certain point. Not meant to be instantiated directly. Instead, use RotatingFileHandler or TimedRotatingFileHandler. """ def __init__(self, filename, mode, encoding=None, delay=False): """ Use the specified filename for streamed logging """ logging.FileHandler.__init__(self, filename, mode, encoding, delay) self.mode = mode self.encoding = encoding self.namer = None self.rotator = None def emit(self, record): """ Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() logging.FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) def rotation_filename(self, default_name): """ Modify the filename of a log file when rotating. This is provided so that a custom filename can be provided. The default implementation calls the 'namer' attribute of the handler, if it's callable, passing the default name to it. If the attribute isn't callable (the default is None), the name is returned unchanged. :param default_name: The default name for the log file. """ if not callable(self.namer): result = default_name else: result = self.namer(default_name) return result def rotate(self, source, dest): """ When rotating, rotate the current log. The default implementation calls the 'rotator' attribute of the handler, if it's callable, passing the source and dest arguments to it. If the attribute isn't callable (the default is None), the source is simply renamed to the destination. :param source: The source filename. This is normally the base filename, e.g. 'test.log' :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. """ if not callable(self.rotator): # Issue 18940: A file may not have been created if delay is True. if os.path.exists(source): os.rename(source, dest) else: self.rotator(source, dest) class RotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a set of files, which switches from one file to the next when the current file reaches a certain size. """ def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False): """ Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. """ # If rotation/rollover is wanted, it doesn't make sense to use another # mode. If for example 'w' were specified, then if there were multiple # runs of the calling application, the logs from previous runs would be # lost if the 'w' is respected, because the log file would be truncated # on each run. if maxBytes > 0: mode = 'a' BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) self.maxBytes = maxBytes self.backupCount = backupCount def doRollover(self): """ Do a rollover, as described in __init__(). """ if self.stream: self.stream.close() self.stream = None if self.backupCount > 0: for i in range(self.backupCount - 1, 0, -1): sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) dfn = self.rotation_filename("%s.%d" % (self.baseFilename, i + 1)) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.rotation_filename(self.baseFilename + ".1") if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if not self.delay: self.stream = self._open() def shouldRollover(self, record): """ Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have. """ if self.stream is None: # delay was set... self.stream = self._open() if self.maxBytes > 0: # are we rolling over? msg = "%s\n" % self.format(record) self.stream.seek(0, 2) #due to non-posix-compliant Windows feature if self.stream.tell() + len(msg) >= self.maxBytes: return 1 return 0 class TimedRotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a file, rotating the log file at certain timed intervals. If backupCount is > 0, when rollover is done, no more than backupCount files are kept - the oldest ones are deleted. """ def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False): BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) self.when = when.upper() self.backupCount = backupCount self.utc = utc # Calculate the real rollover interval, which is just the number of # seconds between rollovers. Also set the filename suffix used when # a rollover occurs. Current 'when' events supported: # S - Seconds # M - Minutes # H - Hours # D - Days # midnight - roll over at midnight # W{0-6} - roll over on a certain day; 0 - Monday # # Case of the 'when' specifier is not important; lower or upper case # will work. if self.when == 'S': self.interval = 1 # one second self.suffix = "%Y-%m-%d_%H-%M-%S" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" elif self.when == 'M': self.interval = 60 # one minute self.suffix = "%Y-%m-%d_%H-%M" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" elif self.when == 'H': self.interval = 60 * 60 # one hour self.suffix = "%Y-%m-%d_%H" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" elif self.when == 'D' or self.when == 'MIDNIGHT': self.interval = 60 * 60 * 24 # one day self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" elif self.when.startswith('W'): self.interval = 60 * 60 * 24 * 7 # one week if len(self.when) != 2: raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) if self.when[1] < '0' or self.when[1] > '6': raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) self.dayOfWeek = int(self.when[1]) self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" else: raise ValueError("Invalid rollover interval specified: %s" % self.when) self.extMatch = re.compile(self.extMatch, re.ASCII) self.interval = self.interval * interval # multiply by units requested if os.path.exists(filename): t = os.stat(filename)[ST_MTIME] else: t = int(time.time()) self.rolloverAt = self.computeRollover(t) def computeRollover(self, currentTime): """ Work out the rollover time based on the specified time. """ result = currentTime + self.interval # If we are rolling over at midnight or weekly, then the interval is already known. # What we need to figure out is WHEN the next interval is. In other words, # if you are rolling over at midnight, then your base interval is 1 day, # but you want to start that one day clock at midnight, not now. So, we # have to fudge the rolloverAt value in order to trigger the first rollover # at the right time. After that, the regular interval will take care of # the rest. Note that this code doesn't care about leap seconds. :) if self.when == 'MIDNIGHT' or self.when.startswith('W'): # This could be done with less code, but I wanted it to be clear if self.utc: t = time.gmtime(currentTime) else: t = time.localtime(currentTime) currentHour = t[3] currentMinute = t[4] currentSecond = t[5] # r is the number of seconds left between now and midnight r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + currentSecond) result = currentTime + r # If we are rolling over on a certain day, add in the number of days until # the next rollover, but offset by 1 since we just calculated the time # until the next day starts. There are three cases: # Case 1) The day to rollover is today; in this case, do nothing # Case 2) The day to rollover is further in the interval (i.e., today is # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to # next rollover is simply 6 - 2 - 1, or 3. # Case 3) The day to rollover is behind us in the interval (i.e., today # is day 5 (Saturday) and rollover is on day 3 (Thursday). # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the # number of days left in the current week (1) plus the number # of days in the next week until the rollover day (3). # The calculations described in 2) and 3) above need to have a day added. # This is because the above time calculation takes us to midnight on this # day, i.e. the start of the next day. if self.when.startswith('W'): day = t[6] # 0 is Monday if day != self.dayOfWeek: if day < self.dayOfWeek: daysToWait = self.dayOfWeek - day else: daysToWait = 6 - day + self.dayOfWeek + 1 newRolloverAt = result + (daysToWait * (60 * 60 * 24)) if not self.utc: dstNow = t[-1] dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 newRolloverAt += addend result = newRolloverAt return result def shouldRollover(self, record): """ Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same """ t = int(time.time()) if t >= self.rolloverAt: return 1 return 0 def getFilesToDelete(self): """ Determine the files to delete when rolling over. More specific than the earlier method, which just used glob.glob(). """ dirName, baseName = os.path.split(self.baseFilename) fileNames = os.listdir(dirName) result = [] prefix = baseName + "." plen = len(prefix) for fileName in fileNames: if fileName[:plen] == prefix: suffix = fileName[plen:] if self.extMatch.match(suffix): result.append(os.path.join(dirName, fileName)) result.sort() if len(result) < self.backupCount: result = [] else: result = result[:len(result) - self.backupCount] return result def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ if self.stream: self.stream.close() self.stream = None # get the time that this sequence started at and make it a TimeTuple currentTime = int(time.time()) dstNow = time.localtime(currentTime)[-1] t = self.rolloverAt - self.interval if self.utc: timeTuple = time.gmtime(t) else: timeTuple = time.localtime(t) dstThen = timeTuple[-1] if dstNow != dstThen: if dstNow: addend = 3600 else: addend = -3600 timeTuple = time.localtime(t + addend) dfn = self.rotation_filename(self.baseFilename + "." + time.strftime(self.suffix, timeTuple)) if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if self.backupCount > 0: for s in self.getFilesToDelete(): os.remove(s) if not self.delay: self.stream = self._open() newRolloverAt = self.computeRollover(currentTime) while newRolloverAt <= currentTime: newRolloverAt = newRolloverAt + self.interval #If DST changes and midnight or weekly rollover, adjust for this. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 newRolloverAt += addend self.rolloverAt = newRolloverAt class WatchedFileHandler(logging.FileHandler): """ A handler for logging to a file, which watches the file to see if it has changed while in use. This can happen because of usage of programs such as newsyslog and logrotate which perform log file rotation. This handler, intended for use under Unix, watches the file to see if it has changed since the last emit. (A file has changed if its device or inode have changed.) If it has changed, the old file stream is closed, and the file opened to get a new stream. This handler is not appropriate for use under Windows, because under Windows open files cannot be moved or renamed - logging opens the files with exclusive locks - and so there is no need for such a handler. Furthermore, ST_INO is not supported under Windows; stat always returns zero for this value. This handler is based on a suggestion and patch by Chad J. Schroeder. """ def __init__(self, filename, mode='a', encoding=None, delay=False): logging.FileHandler.__init__(self, filename, mode, encoding, delay) self.dev, self.ino = -1, -1 self._statstream() def _statstream(self): if self.stream: sres = os.fstat(self.stream.fileno()) self.dev, self.ino = sres[ST_DEV], sres[ST_INO] def emit(self, record): """ Emit a record. First check if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream. """ # Reduce the chance of race conditions by stat'ing by path only # once and then fstat'ing our new fd if we opened a new log stream. # See issue #14632: Thanks to John Mulligan for the problem report # and patch. try: # stat the file by path, checking for existence sres = os.stat(self.baseFilename) except OSError as err: if err.errno == errno.ENOENT: sres = None else: raise # compare file system stat with that of our stream file handle if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: if self.stream is not None: # we have an open file handle, clean it up self.stream.flush() self.stream.close() # open a new file handle and get new stat info from that fd self.stream = self._open() self._statstream() logging.FileHandler.emit(self, record) class SocketHandler(logging.Handler): """ A handler class which writes logging records, in pickle format, to a streaming socket. The socket is kept open across logging calls. If the peer resets it, an attempt is made to reconnect on the next call. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. When the attribute *closeOnError* is set to True - if a socket error occurs, the socket is silently closed and then reopened on the next logging call. """ logging.Handler.__init__(self) self.host = host self.port = port self.sock = None self.closeOnError = False self.retryTime = None # # Exponential backoff parameters. # self.retryStart = 1.0 self.retryMax = 30.0 self.retryFactor = 2.0 def makeSocket(self, timeout=1): """ A factory method which allows subclasses to define the precise type of socket they want. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if hasattr(s, 'settimeout'): s.settimeout(timeout) try: s.connect((self.host, self.port)) return s except socket.error: s.close() raise def createSocket(self): """ Try to create a socket, using an exponential backoff with a max retry time. Thanks to Robert Olson for the original patch (SF #815911) which has been slightly refactored. """ now = time.time() # Either retryTime is None, in which case this # is the first time back after a disconnect, or # we've waited long enough. if self.retryTime is None: attempt = True else: attempt = (now >= self.retryTime) if attempt: try: self.sock = self.makeSocket() self.retryTime = None # next time, no delay before trying except socket.error: #Creation failed, so set the retry time and return. if self.retryTime is None: self.retryPeriod = self.retryStart else: self.retryPeriod = self.retryPeriod * self.retryFactor if self.retryPeriod > self.retryMax: self.retryPeriod = self.retryMax self.retryTime = now + self.retryPeriod def send(self, s): """ Send a pickled string to the socket. This function allows for partial sends which can happen when the network is busy. """ if self.sock is None: self.createSocket() #self.sock can be None either because we haven't reached the retry #time yet, or because we have reached the retry time and retried, #but are still unable to connect. if self.sock: try: if hasattr(self.sock, "sendall"): self.sock.sendall(s) else: #pragma: no cover sentsofar = 0 left = len(s) while left > 0: sent = self.sock.send(s[sentsofar:]) sentsofar = sentsofar + sent left = left - sent except socket.error: #pragma: no cover self.sock.close() self.sock = None # so we can call createSocket next time def makePickle(self, record): """ Pickles the record in binary format with a length prefix, and returns it ready for transmission across the socket. """ ei = record.exc_info if ei: # just to get traceback text into record.exc_text ... dummy = self.format(record) # See issue #14436: If msg or args are objects, they may not be # available on the receiving end. So we convert the msg % args # to a string, save it as msg and zap the args. d = dict(record.__dict__) d['msg'] = record.getMessage() d['args'] = None d['exc_info'] = None s = pickle.dumps(d, 1) slen = struct.pack(">L", len(s)) return slen + s def handleError(self, record): """ Handle an error during logging. An error has occurred during logging. Most likely cause - connection lost. Close the socket so that we can retry on the next event. """ if self.closeOnError and self.sock: self.sock.close() self.sock = None #try to reconnect next time else: logging.Handler.handleError(self, record) def emit(self, record): """ Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket. """ try: s = self.makePickle(record) self.send(s) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) def close(self): """ Closes the socket. """ self.acquire() try: if self.sock: self.sock.close() self.sock = None logging.Handler.close(self) finally: self.release() class DatagramHandler(SocketHandler): """ A handler class which writes logging records, in pickle format, to a datagram socket. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. """ SocketHandler.__init__(self, host, port) self.closeOnError = False def makeSocket(self): """ The factory method of SocketHandler is here overridden to create a UDP socket (SOCK_DGRAM). """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return s def send(self, s): """ Send a pickled string to a socket. This function no longer allows for partial sends which can happen when the network is busy - UDP does not guarantee delivery and can deliver packets out of sequence. """ if self.sock is None: self.createSocket() self.sock.sendto(s, (self.host, self.port)) class SysLogHandler(logging.Handler): """ A handler class which sends formatted logging records to a syslog server. Based on Sam Rushing's syslog module: http://www.nightmare.com/squirl/python-ext/misc/syslog.py Contributed by Nicolas Untz (after which minor refactoring changes have been made). """ # from <linux/sys/syslog.h>: # ====================================================================== # priorities/facilities are encoded into a single 32-bit quantity, where # the bottom 3 bits are the priority (0-7) and the top 28 bits are the # facility (0-big number). Both the priorities and the facilities map # roughly one-to-one to strings in the syslogd(8) source code. This # mapping is included in this file. # # priorities (these are ordered) LOG_EMERG = 0 # system is unusable LOG_ALERT = 1 # action must be taken immediately LOG_CRIT = 2 # critical conditions LOG_ERR = 3 # error conditions LOG_WARNING = 4 # warning conditions LOG_NOTICE = 5 # normal but significant condition LOG_INFO = 6 # informational LOG_DEBUG = 7 # debug-level messages # facility codes LOG_KERN = 0 # kernel messages LOG_USER = 1 # random user-level messages LOG_MAIL = 2 # mail system LOG_DAEMON = 3 # system daemons LOG_AUTH = 4 # security/authorization messages LOG_SYSLOG = 5 # messages generated internally by syslogd LOG_LPR = 6 # line printer subsystem LOG_NEWS = 7 # network news subsystem LOG_UUCP = 8 # UUCP subsystem LOG_CRON = 9 # clock daemon LOG_AUTHPRIV = 10 # security/authorization messages (private) LOG_FTP = 11 # FTP daemon # other codes through 15 reserved for system use LOG_LOCAL0 = 16 # reserved for local use LOG_LOCAL1 = 17 # reserved for local use LOG_LOCAL2 = 18 # reserved for local use LOG_LOCAL3 = 19 # reserved for local use LOG_LOCAL4 = 20 # reserved for local use LOG_LOCAL5 = 21 # reserved for local use LOG_LOCAL6 = 22 # reserved for local use LOG_LOCAL7 = 23 # reserved for local use priority_names = { "alert": LOG_ALERT, "crit": LOG_CRIT, "critical": LOG_CRIT, "debug": LOG_DEBUG, "emerg": LOG_EMERG, "err": LOG_ERR, "error": LOG_ERR, # DEPRECATED "info": LOG_INFO, "notice": LOG_NOTICE, "panic": LOG_EMERG, # DEPRECATED "warn": LOG_WARNING, # DEPRECATED "warning": LOG_WARNING, } facility_names = { "auth": LOG_AUTH, "authpriv": LOG_AUTHPRIV, "cron": LOG_CRON, "daemon": LOG_DAEMON, "ftp": LOG_FTP, "kern": LOG_KERN, "lpr": LOG_LPR, "mail": LOG_MAIL, "news": LOG_NEWS, "security": LOG_AUTH, # DEPRECATED "syslog": LOG_SYSLOG, "user": LOG_USER, "uucp": LOG_UUCP, "local0": LOG_LOCAL0, "local1": LOG_LOCAL1, "local2": LOG_LOCAL2, "local3": LOG_LOCAL3, "local4": LOG_LOCAL4, "local5": LOG_LOCAL5, "local6": LOG_LOCAL6, "local7": LOG_LOCAL7, } #The map below appears to be trivially lowercasing the key. However, #there's more to it than meets the eye - in some locales, lowercasing #gives unexpected results. See SF #1524081: in the Turkish locale, #"INFO".lower() != "info" priority_map = { "DEBUG" : "debug", "INFO" : "info", "WARNING" : "warning", "ERROR" : "error", "CRITICAL" : "critical" } def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER, socktype=None): """ Initialize a handler. If address is specified as a string, a UNIX socket is used. To log to a local syslogd, "SysLogHandler(address="/dev/log")" can be used. If facility is not specified, LOG_USER is used. """ logging.Handler.__init__(self) self.address = address self.facility = facility self.socktype = socktype if isinstance(address, str): self.unixsocket = True self._connect_unixsocket(address) else: self.unixsocket = False if socktype is None: socktype = socket.SOCK_DGRAM self.socket = socket.socket(socket.AF_INET, socktype) if socktype == socket.SOCK_STREAM: self.socket.connect(address) self.socktype = socktype self.formatter = None def _connect_unixsocket(self, address): use_socktype = self.socktype if use_socktype is None: use_socktype = socket.SOCK_DGRAM self.socket = socket.socket(socket.AF_UNIX, use_socktype) try: self.socket.connect(address) # it worked, so set self.socktype to the used type self.socktype = use_socktype except socket.error: self.socket.close() if self.socktype is not None: # user didn't specify falling back, so fail raise use_socktype = socket.SOCK_STREAM self.socket = socket.socket(socket.AF_UNIX, use_socktype) try: self.socket.connect(address) # it worked, so set self.socktype to the used type self.socktype = use_socktype except socket.error: self.socket.close() raise def encodePriority(self, facility, priority): """ Encode the facility and priority. You can pass in strings or integers - if strings are passed, the facility_names and priority_names mapping dictionaries are used to convert them to integers. """ if isinstance(facility, str): facility = self.facility_names[facility] if isinstance(priority, str): priority = self.priority_names[priority] return (facility << 3) | priority def close (self): """ Closes the socket. """ self.acquire() try: self.socket.close() logging.Handler.close(self) finally: self.release() def mapPriority(self, levelName): """ Map a logging level name to a key in the priority_names map. This is useful in two scenarios: when custom levels are being used, and in the case where you can't do a straightforward mapping by lowercasing the logging level name because of locale- specific issues (see SF #1524081). """ return self.priority_map.get(levelName, "warning") ident = '' # prepended to all messages append_nul = True # some old syslog daemons expect a NUL terminator def emit(self, record): """ Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server. """ msg = self.format(record) if self.ident: msg = self.ident + msg if self.append_nul: msg += '\000' """ We need to convert record level to lowercase, maybe this will change in the future. """ prio = '<%d>' % self.encodePriority(self.facility, self.mapPriority(record.levelname)) prio = prio.encode('utf-8') # Message is a string. Convert to bytes as required by RFC 5424 msg = msg.encode('utf-8') msg = prio + msg try: if self.unixsocket: try: self.socket.send(msg) except socket.error: self.socket.close() self._connect_unixsocket(self.address) self.socket.send(msg) elif self.socktype == socket.SOCK_DGRAM: self.socket.sendto(msg, self.address) else: self.socket.sendall(msg) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) class SMTPHandler(logging.Handler): """ A handler class which sends an SMTP email for each logging event. """ def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None, secure=None, timeout=5.0): """ Initialize the handler. Initialize the instance with the from and to addresses and subject line of the email. To specify a non-standard SMTP port, use the (host, port) tuple format for the mailhost argument. To specify authentication credentials, supply a (username, password) tuple for the credentials argument. To specify the use of a secure protocol (TLS), pass in a tuple for the secure argument. This will only be used when authentication credentials are supplied. The tuple will be either an empty tuple, or a single-value tuple with the name of a keyfile, or a 2-value tuple with the names of the keyfile and certificate file. (This tuple is passed to the `starttls` method). A timeout in seconds can be specified for the SMTP connection (the default is one second). """ logging.Handler.__init__(self) if isinstance(mailhost, tuple): self.mailhost, self.mailport = mailhost else: self.mailhost, self.mailport = mailhost, None if isinstance(credentials, tuple): self.username, self.password = credentials else: self.username = None self.fromaddr = fromaddr if isinstance(toaddrs, str): toaddrs = [toaddrs] self.toaddrs = toaddrs self.subject = subject self.secure = secure self.timeout = timeout def getSubject(self, record): """ Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method. """ return self.subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.quit() except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) class NTEventLogHandler(logging.Handler): """ A handler class which sends events to the NT Event Log. Adds a registry entry for the specified application name. If no dllname is provided, win32service.pyd (which contains some basic message placeholders) is used. Note that use of these placeholders will make your event logs big, as the entire message source is held in the log. If you want slimmer logs, you have to pass in the name of your own DLL which contains the message definitions you want to use in the event log. """ def __init__(self, appname, dllname=None, logtype="Application"): logging.Handler.__init__(self) try: import win32evtlogutil, win32evtlog self.appname = appname self._welu = win32evtlogutil if not dllname: dllname = os.path.split(self._welu.__file__) dllname = os.path.split(dllname[0]) dllname = os.path.join(dllname[0], r'win32service.pyd') self.dllname = dllname self.logtype = logtype self._welu.AddSourceToRegistry(appname, dllname, logtype) self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE self.typemap = { logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, } except ImportError: print("The Python Win32 extensions for NT (service, event "\ "logging) appear not to be available.") self._welu = None def getMessageID(self, record): """ Return the message ID for the event record. If you are using your own messages, you could do this by having the msg passed to the logger being an ID rather than a formatting string. Then, in here, you could use a dictionary lookup to get the message ID. This version returns 1, which is the base message ID in win32service.pyd. """ return 1 def getEventCategory(self, record): """ Return the event category for the record. Override this if you want to specify your own categories. This version returns 0. """ return 0 def getEventType(self, record): """ Return the event type for the record. Override this if you want to specify your own types. This version does a mapping using the handler's typemap attribute, which is set up in __init__() to a dictionary which contains mappings for DEBUG, INFO, WARNING, ERROR and CRITICAL. If you are using your own levels you will either need to override this method or place a suitable dictionary in the handler's typemap attribute. """ return self.typemap.get(record.levelno, self.deftype) def emit(self, record): """ Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log. """ if self._welu: try: id = self.getMessageID(record) cat = self.getEventCategory(record) type = self.getEventType(record) msg = self.format(record) self._welu.ReportEvent(self.appname, id, cat, type, [msg]) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) def close(self): """ Clean up this handler. You can remove the application name from the registry as a source of event log entries. However, if you do this, you will not be able to see the events as you intended in the Event Log Viewer - it needs to be able to access the registry to get the DLL name. """ #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) logging.Handler.close(self) class HTTPHandler(logging.Handler): """ A class which sends records to a Web server, using either GET or POST semantics. """ def __init__(self, host, url, method="GET", secure=False, credentials=None): """ Initialize the instance with the host, the request URL, and the method ("GET" or "POST") """ logging.Handler.__init__(self) method = method.upper() if method not in ["GET", "POST"]: raise ValueError("method must be GET or POST") self.host = host self.url = url self.method = method self.secure = secure self.credentials = credentials def mapLogRecord(self, record): """ Default implementation of mapping the log record into a dict that is sent as the CGI data. Overwrite in your class. Contributed by Franz Glasner. """ return record.__dict__ def emit(self, record): """ Emit a record. Send the record to the Web server as a percent-encoded dictionary """ try: import http.client, urllib.parse host = self.host if self.secure: h = http.client.HTTPSConnection(host) else: h = http.client.HTTPConnection(host) url = self.url data = urllib.parse.urlencode(self.mapLogRecord(record)) if self.method == "GET": if (url.find('?') >= 0): sep = '&' else: sep = '?' url = url + "%c%s" % (sep, data) h.putrequest(self.method, url) # support multiple hosts on one IP address... # need to strip optional :port from host, if present i = host.find(":") if i >= 0: host = host[:i] h.putheader("Host", host) if self.method == "POST": h.putheader("Content-type", "application/x-www-form-urlencoded") h.putheader("Content-length", str(len(data))) if self.credentials: import base64 s = ('u%s:%s' % self.credentials).encode('utf-8') s = 'Basic ' + base64.b64encode(s).strip() h.putheader('Authorization', s) h.endheaders() if self.method == "POST": h.send(data.encode('utf-8')) h.getresponse() #can't do anything with the result except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) class BufferingHandler(logging.Handler): """ A handler class which buffers logging records in memory. Whenever each record is added to the buffer, a check is made to see if the buffer should be flushed. If it should, then flush() is expected to do what's needed. """ def __init__(self, capacity): """ Initialize the handler with the buffer size. """ logging.Handler.__init__(self) self.capacity = capacity self.buffer = [] def shouldFlush(self, record): """ Should the handler flush its buffer? Returns true if the buffer is up to capacity. This method can be overridden to implement custom flushing strategies. """ return (len(self.buffer) >= self.capacity) def emit(self, record): """ Emit a record. Append the record. If shouldFlush() tells us to, call flush() to process the buffer. """ self.buffer.append(record) if self.shouldFlush(record): self.flush() def flush(self): """ Override to implement custom flushing behaviour. This version just zaps the buffer to empty. """ self.acquire() try: self.buffer = [] finally: self.release() def close(self): """ Close the handler. This version just flushes and chains to the parent class' close(). """ self.flush() logging.Handler.close(self) class MemoryHandler(BufferingHandler): """ A handler class which buffers logging records in memory, periodically flushing them to a target handler. Flushing occurs whenever the buffer is full, or when an event of a certain severity or greater is seen. """ def __init__(self, capacity, flushLevel=logging.ERROR, target=None): """ Initialize the handler with the buffer size, the level at which flushing should occur and an optional target. Note that without a target being set either here or via setTarget(), a MemoryHandler is no use to anyone! """ BufferingHandler.__init__(self, capacity) self.flushLevel = flushLevel self.target = target def shouldFlush(self, record): """ Check for buffer full or a record at the flushLevel or higher. """ return (len(self.buffer) >= self.capacity) or \ (record.levelno >= self.flushLevel) def setTarget(self, target): """ Set the target handler for this handler. """ self.target = target def flush(self): """ For a MemoryHandler, flushing means just sending the buffered records to the target, if there is one. Override if you want different behaviour. The record buffer is also cleared by this operation. """ self.acquire() try: if self.target: for record in self.buffer: self.target.handle(record) self.buffer = [] finally: self.release() def close(self): """ Flush, set the target to None and lose the buffer. """ self.flush() self.acquire() try: self.target = None BufferingHandler.close(self) finally: self.release() class QueueHandler(logging.Handler): """ This handler sends events to a queue. Typically, it would be used together with a multiprocessing Queue to centralise logging to file in one process (in a multi-process application), so as to avoid file write contention between processes. This code is new in Python 3.2, but this class can be copy pasted into user code for use with earlier Python versions. """ def __init__(self, queue): """ Initialise an instance, using the passed queue. """ logging.Handler.__init__(self) self.queue = queue def enqueue(self, record): """ Enqueue a record. The base implementation uses put_nowait. You may want to override this method if you want to use blocking, timeouts or custom queue implementations. """ self.queue.put_nowait(record) def prepare(self, record): """ Prepares a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact. """ # The format operation gets traceback text into record.exc_text # (if there's exception data), and also puts the message into # record.message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info attribute, as it's no longer needed and, if not None, # will typically not be pickleable. self.format(record) record.msg = record.message record.args = None record.exc_info = None return record def emit(self, record): """ Emit a record. Writes the LogRecord to the queue, preparing it for pickling first. """ try: self.enqueue(self.prepare(record)) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) if threading: class QueueListener(object): """ This class implements an internal threaded listener which watches for LogRecords being added to a queue, removes them and passes them to a list of handlers for processing. """ _sentinel = None def __init__(self, queue, *handlers): """ Initialise an instance with the specified queue and handlers. """ self.queue = queue self.handlers = handlers self._stop = threading.Event() self._thread = None def dequeue(self, block): """ Dequeue a record and return it, optionally blocking. The base implementation uses get. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ return self.queue.get(block) def start(self): """ Start the listener. This starts up a background thread to monitor the queue for LogRecords to process. """ self._thread = t = threading.Thread(target=self._monitor) t.setDaemon(True) t.start() def prepare(self , record): """ Prepare a record for handling. This method just returns the passed-in record. You may want to override this method if you need to do any custom marshalling or manipulation of the record before passing it to the handlers. """ return record def handle(self, record): """ Handle a record. This just loops through the handlers offering them the record to handle. """ record = self.prepare(record) for handler in self.handlers: handler.handle(record) def _monitor(self): """ Monitor the queue for records, and ask the handler to deal with them. This method runs on a separate, internal thread. The thread will terminate if it sees a sentinel object in the queue. """ q = self.queue has_task_done = hasattr(q, 'task_done') while not self._stop.isSet(): try: record = self.dequeue(True) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: pass # There might still be records in the queue. while True: try: record = self.dequeue(False) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: break def enqueue_sentinel(self): """ This is used to enqueue the sentinel record. The base implementation uses put_nowait. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ self.queue.put_nowait(self._sentinel) def stop(self): """ Stop the listener. This asks the thread to terminate, and then waits for it to do so. Note that if you don't call this before your application exits, there may be some records still left on the queue, which won't be processed. """ self._stop.set() self.enqueue_sentinel() self._thread.join() self._thread = None
jiwanlimbu/aura
refs/heads/master
keystone/common/sql/expand_repo/versions/004_reset_password_created_at.py
40
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): pass
techdragon/django
refs/heads/master
tests/expressions/models.py
261
""" Tests for F() query expression syntax. """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Employee(models.Model): firstname = models.CharField(max_length=50) lastname = models.CharField(max_length=50) salary = models.IntegerField(blank=True, null=True) def __str__(self): return '%s %s' % (self.firstname, self.lastname) @python_2_unicode_compatible class Company(models.Model): name = models.CharField(max_length=100) num_employees = models.PositiveIntegerField() num_chairs = models.PositiveIntegerField() ceo = models.ForeignKey( Employee, models.CASCADE, related_name='company_ceo_set') point_of_contact = models.ForeignKey( Employee, models.SET_NULL, related_name='company_point_of_contact_set', null=True) def __str__(self): return self.name @python_2_unicode_compatible class Number(models.Model): integer = models.BigIntegerField(db_column='the_integer') float = models.FloatField(null=True, db_column='the_float') def __str__(self): return '%i, %.3f' % (self.integer, self.float) class Experiment(models.Model): name = models.CharField(max_length=24) assigned = models.DateField() completed = models.DateField() estimated_time = models.DurationField() start = models.DateTimeField() end = models.DateTimeField() class Meta: ordering = ('name',) def duration(self): return self.end - self.start @python_2_unicode_compatible class Time(models.Model): time = models.TimeField(null=True) def __str__(self): return "%s" % self.time @python_2_unicode_compatible class UUID(models.Model): uuid = models.UUIDField(null=True) def __str__(self): return "%s" % self.uuid
koobonil/Boss2D
refs/heads/master
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/distributions/python/ops/deterministic.py
64
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Deterministic distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import distribution __all__ = [ "Deterministic", "VectorDeterministic", ] @six.add_metaclass(abc.ABCMeta) class _BaseDeterministic(distribution.Distribution): """Base class for Deterministic distributions.""" def __init__(self, loc, atol=None, rtol=None, is_vector=False, validate_args=False, allow_nan_stats=True, name="_BaseDeterministic"): """Initialize a batch of `_BaseDeterministic` distributions. The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf` computations, e.g. due to floating-point error. ``` pmf(x; loc) = 1, if Abs(x - loc) <= atol + rtol * Abs(loc), = 0, otherwise. ``` Args: loc: Numeric `Tensor`. The point (or batch of points) on which this distribution is supported. atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable shape. The absolute tolerance for comparing closeness to `loc`. Default is `0`. rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable shape. The relative tolerance for comparing closeness to `loc`. Default is `0`. is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`, else `Deterministic`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: If `loc` is a scalar. """ parameters = locals() with ops.name_scope(name, values=[loc, atol, rtol]): loc = ops.convert_to_tensor(loc, name="loc") if is_vector and validate_args: msg = "Argument loc must be at least rank 1." if loc.get_shape().ndims is not None: if loc.get_shape().ndims < 1: raise ValueError(msg) else: loc = control_flow_ops.with_dependencies( [check_ops.assert_rank_at_least(loc, 1, message=msg)], loc) self._loc = loc super(_BaseDeterministic, self).__init__( dtype=self._loc.dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc], name=name) self._atol = self._get_tol(atol) self._rtol = self._get_tol(rtol) # Avoid using the large broadcast with self.loc if possible. if rtol is None: self._slack = self.atol else: self._slack = self.atol + self.rtol * math_ops.abs(self.loc) def _get_tol(self, tol): if tol is None: return ops.convert_to_tensor(0, dtype=self.loc.dtype) tol = ops.convert_to_tensor(tol, dtype=self.loc.dtype) if self.validate_args: tol = control_flow_ops.with_dependencies([ check_ops.assert_non_negative( tol, message="Argument 'tol' must be non-negative") ], tol) return tol @property def loc(self): """Point (or batch of points) at which this distribution is supported.""" return self._loc @property def atol(self): """Absolute tolerance for comparing points to `self.loc`.""" return self._atol @property def rtol(self): """Relative tolerance for comparing points to `self.loc`.""" return self._rtol def _mean(self): return array_ops.identity(self.loc) def _variance(self): return array_ops.zeros_like(self.loc) def _mode(self): return self.mean() def _sample_n(self, n, seed=None): # pylint: disable=unused-arg n_static = tensor_util.constant_value(ops.convert_to_tensor(n)) if n_static is not None and self.loc.get_shape().ndims is not None: ones = [1] * self.loc.get_shape().ndims multiples = [n_static] + ones else: ones = array_ops.ones_like(array_ops.shape(self.loc)) multiples = array_ops.concat(([n], ones), axis=0) return array_ops.tile(self.loc[array_ops.newaxis, ...], multiples=multiples) class Deterministic(_BaseDeterministic): """Scalar `Deterministic` distribution on the real line. The scalar `Deterministic` distribution is parameterized by a [batch] point `loc` on the real line. The distribution is supported at this point only, and corresponds to a random variable that is constant, equal to `loc`. See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution). #### Mathematical Details The probability mass function (pmf) and cumulative distribution function (cdf) are ```none pmf(x; loc) = 1, if x == loc, else 0 cdf(x; loc) = 1, if x >= loc, else 0 ``` #### Examples ```python # Initialize a single Deterministic supported at zero. constant = tf.contrib.distributions.Deterministic(0.) constant.prob(0.) ==> 1. constant.prob(2.) ==> 0. # Initialize a [2, 2] batch of scalar constants. loc = [[0., 1.], [2., 3.]] x = [[0., 1.1], [1.99, 3.]] constant = tf.contrib.distributions.Deterministic(loc) constant.prob(x) ==> [[1., 0.], [0., 1.]] ``` """ def __init__(self, loc, atol=None, rtol=None, validate_args=False, allow_nan_stats=True, name="Deterministic"): """Initialize a scalar `Deterministic` distribution. The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf` computations, e.g. due to floating-point error. ``` pmf(x; loc) = 1, if Abs(x - loc) <= atol + rtol * Abs(loc), = 0, otherwise. ``` Args: loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`. The point (or batch of points) on which this distribution is supported. atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable shape. The absolute tolerance for comparing closeness to `loc`. Default is `0`. rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable shape. The relative tolerance for comparing closeness to `loc`. Default is `0`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ super(Deterministic, self).__init__( loc, atol=atol, rtol=rtol, validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) def _batch_shape_tensor(self): return array_ops.shape(self.loc) def _batch_shape(self): return self.loc.get_shape() def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _prob(self, x): return math_ops.cast( math_ops.abs(x - self.loc) <= self._slack, dtype=self.dtype) def _cdf(self, x): return math_ops.cast(x >= self.loc - self._slack, dtype=self.dtype) class VectorDeterministic(_BaseDeterministic): """Vector `Deterministic` distribution on `R^k`. The `VectorDeterministic` distribution is parameterized by a [batch] point `loc in R^k`. The distribution is supported at this point only, and corresponds to a random variable that is constant, equal to `loc`. See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution). #### Mathematical Details The probability mass function (pmf) is ```none pmf(x; loc) = 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)], = 0, otherwise. ``` #### Examples ```python # Initialize a single VectorDeterministic supported at [0., 2.] in R^2. constant = tf.contrib.distributions.Deterministic([0., 2.]) constant.prob([0., 2.]) ==> 1. constant.prob([0., 3.]) ==> 0. # Initialize a [3] batch of constants on R^2. loc = [[0., 1.], [2., 3.], [4., 5.]] constant = constant_lib.VectorDeterministic(loc) constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]]) ==> [1., 0., 0.] ``` """ def __init__(self, loc, atol=None, rtol=None, validate_args=False, allow_nan_stats=True, name="VectorDeterministic"): """Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`. Note that there is only one point in `R^0`, the "point" `[]`. So if `k = 0` then `self.prob([]) == 1`. The `atol` and `rtol` parameters allow for some slack in `pmf` computations, e.g. due to floating-point error. ``` pmf(x; loc) = 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)], = 0, otherwise ``` Args: loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0` The point (or batch of points) on which this distribution is supported. atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable shape. The absolute tolerance for comparing closeness to `loc`. Default is `0`. rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable shape. The relative tolerance for comparing closeness to `loc`. Default is `0`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ super(VectorDeterministic, self).__init__( loc, atol=atol, rtol=rtol, is_vector=True, validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) def _batch_shape_tensor(self): return array_ops.shape(self.loc)[:-1] def _batch_shape(self): return self.loc.get_shape()[:-1] def _event_shape_tensor(self): return array_ops.shape(self.loc)[-1] def _event_shape(self): return self.loc.get_shape()[-1:] def _prob(self, x): if self.validate_args: is_vector_check = check_ops.assert_rank_at_least(x, 1) right_vec_space_check = check_ops.assert_equal( self.event_shape_tensor(), array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1), message= "Argument 'x' not defined in the same space R^k as this distribution") with ops.control_dependencies([is_vector_check]): with ops.control_dependencies([right_vec_space_check]): x = array_ops.identity(x) return math_ops.cast( math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1), dtype=self.dtype)
susilehtola/psi4
refs/heads/master
tests/pytests/conftest.py
7
import pytest def pytest_configure(config): # Register marks to avoid warnings in psi4.test() # sync with setup.cfg config.addinivalue_line("markers", "check_triplet") config.addinivalue_line("markers", "dft") config.addinivalue_line("markers", "gga") config.addinivalue_line("markers", "hf") config.addinivalue_line("markers", "hyb_gga") config.addinivalue_line("markers", "hyb_gga_lrc") config.addinivalue_line("markers", "lda") config.addinivalue_line("markers", "long") config.addinivalue_line("markers", "mdi") config.addinivalue_line("markers", "mp2") config.addinivalue_line("markers", "restricted_singlet") config.addinivalue_line("markers", "restricted_triplet") config.addinivalue_line("markers", "RPA") config.addinivalue_line("markers", "scf") config.addinivalue_line("markers", """slow: marks tests as slow (deselect with '-m "not slow"')""") config.addinivalue_line("markers", "smoke") config.addinivalue_line("markers", "solver") config.addinivalue_line("markers", "stress") config.addinivalue_line("markers", "TDA") config.addinivalue_line("markers", "tdscf") config.addinivalue_line("markers", "quick") config.addinivalue_line("markers", "unittest") config.addinivalue_line("markers", "unrestricted") @pytest.fixture(scope="session", autouse=True) def set_up_overall(request): import psi4 psi4.set_output_file("pytest_output.dat", False) request.addfinalizer(tear_down) @pytest.fixture(scope="function", autouse=True) def set_up(): import psi4 psi4.core.clean() psi4.core.clean_timers() psi4.core.clean_options() psi4.set_output_file("pytest_output.dat", True) def tear_down(): import os import glob import psi4 psi4.core.close_outfile() patterns = [ "cavity.*", "grid*", "pytest_output.*h5", "pytest_output.dat", "pytest_output.*grad", "*pcmsolver.inp", "PEDRA.OUT*", "timer.dat", "FCIDUMP_SCF", "FCIDUMP_MP2", "*.fchk", ] pytest_scratches = [] for pat in patterns: pytest_scratches.extend(glob.glob(pat)) for fl in pytest_scratches: try: os.unlink(fl) except (OSError, PermissionError): pass
Antiun/connector-magento
refs/heads/8.0
magentoerpconnect/related_action.py
11
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2014 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Related Actions for Magento: Related actions are associated with jobs. When called on a job, they will return an action to the client. """ import functools from openerp import exceptions, _ from openerp.addons.connector import related_action from .connector import get_environment from .unit.backend_adapter import GenericAdapter from .unit.binder import MagentoBinder unwrap_binding = functools.partial(related_action.unwrap_binding, binder_class=MagentoBinder) def link(session, job, backend_id_pos=2, magento_id_pos=3): """ Open a Magento URL on the admin page to view/edit the record related to the job. """ binding_model = job.args[0] # shift one to the left because session is not in job.args backend_id = job.args[backend_id_pos - 1] magento_id = job.args[magento_id_pos - 1] env = get_environment(session, binding_model, backend_id) adapter = env.get_connector_unit(GenericAdapter) try: url = adapter.admin_url(magento_id) except ValueError: raise exceptions.Warning( _('No admin URL configured on the backend or ' 'no admin path is defined for this record.') ) action = { 'type': 'ir.actions.act_url', 'target': 'new', 'url': url, } return action
highweb-project/highweb-webcl-html5spec
refs/heads/highweb-20160310
tools/perf/benchmarks/start_with_ext.py
14
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from core import perf_benchmark from measurements import startup import page_sets from telemetry import benchmark class _StartWithExt(perf_benchmark.PerfBenchmark): """Base benchmark for testing startup with extensions.""" page_set = page_sets.BlankPageSetWithExtensionProfile tag = None @classmethod def Name(cls): return 'start_with_ext.blank_page' @classmethod def ValueCanBeAddedPredicate(cls, _, is_first_result): return not is_first_result def SetExtraBrowserOptions(self, options): options.disable_default_apps = False def CreatePageTest(self, _): is_cold = (self.tag == 'cold') return startup.Startup(cold=is_cold) @benchmark.Enabled('has tabs') @benchmark.Disabled('mac') # crbug.com/563424 @benchmark.Disabled('win', 'linux', 'reference', 'android') class StartWithExtCold(_StartWithExt): """Measure time to start Chrome cold with extensions.""" options = {'pageset_repeat': 5} tag = 'cold' @classmethod def Name(cls): return 'start_with_ext.cold.blank_page' @benchmark.Enabled('has tabs') @benchmark.Disabled('mac') # crbug.com/563424 @benchmark.Disabled('win', 'linux', 'reference', 'android') class StartWithExtWarm(_StartWithExt): """Measure time to start Chrome warm with extensions.""" options = {'pageset_repeat': 20} tag = 'warm' @classmethod def Name(cls): return 'start_with_ext.warm.blank_page'
Kongsea/tensorflow
refs/heads/master
tensorflow/contrib/seq2seq/python/ops/beam_search_ops.py
124
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Beam Search helper ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.seq2seq.ops import gen_beam_search_ops from tensorflow.contrib.util import loader from tensorflow.python.platform import resource_loader _beam_search_ops_so = loader.load_op_library( resource_loader.get_path_to_datafile("_beam_search_ops.so")) gather_tree = gen_beam_search_ops.gather_tree
optiminimalist/pyphoto
refs/heads/master
pyphoto/flickr_uploader.py
1
""" pyphoto.flickr_uploader ~~~~~~~~~~~~~~~~ Uploads photos to flickr. :copyright: (c) 2013 by Michael Luckeneder. """ from __future__ import absolute_import import flickrapi import logging from urllib2 import HTTPError from flickrapi.exceptions import FlickrError from .retrier import retrier from functools import partial LOG = logging.getLogger() class FlickrUploader(object): """represents a flickr instance""" def __init__(self, **kwargs): self.sets = None self.photos = [] self.flickr_key = kwargs.pop('flickr_key') self.flickr_secret = kwargs.pop('flickr_secret') self.flickr_namespace = kwargs.get('flickr_namespace', "pyphoto") self.pyphoto_setid = None # get auth token and update sets, photos self.flickr = self._authorize_flickr() self.update_from_server() def update_from_server(self): """update set sand photos from flickr""" self.sets = self.get_sets() self.photos = self.get_photos() @retrier def get_sets(self): """get a dict of sets :return: dict() """ # get response sets_response = self.flickr.photosets_getList() set_list = list(sets_response)[0] sets = {} # generate set list for photo_set in set_list.iter('photoset'): title = photo_set.find('title').text description = photo_set.find('description').text set_info = {'id': photo_set.attrib['id'], 'date_update': photo_set.attrib['date_update'], 'description': description} sets[title] = set_info return sets @retrier def upload_file(self, filename, guid): """uploads an image with filename and guid""" res = self.flickr.upload(filename=filename, title=guid, description=guid, is_public=0, is_family=0, is_friend=0) # return the flickr id of the photo return res.find('photoid').text def get_photos(self, items_per_page=500): """get a list of photos :return: dict() """ # TODO compress if self.flickr_namespace not in self.sets: return [] # monkey patch for flickrapi problem pages = partial(self.flickr.photosets_getPhotos, photoset_id=self.sets[self.flickr_namespace]["id"], per_page=items_per_page) pages = retrier(pages) num_pages = page = 1 photos = {} while page <= num_pages: LOG.debug("Retrieving page %i of set %s", page, self.flickr_namespace) res = pages(page=page) num_pages = int(res.find('photoset').get('pages')) page += 1 for photo in list(res)[0].iter('photo'): photos[photo.attrib['title']] = photo.attrib # photo_list.append(list(res.find('photo'))) # for photo in res.find('photos'): # if photo.get('title') == guid: # return photo.get('id') # return None # photo_list = list(photos)[0] # photos = {} # for photo in photo_list: # photos[photo.attrib['title']] = photo.attrib return photos def get_photos_for_set(self, photoset_id): """get a list of photos :return: dict() """ # monkey patch for flickrapi problem pages = partial(self.flickr.photosets_getPhotos, photoset_id=photoset_id, per_page=500) pages = retrier(pages) num_pages = page = 1 photos = {} while page <= num_pages: LOG.debug("Retrieving page %i of set %s", page, photoset_id) res = pages(page=page) num_pages = int(res.find('photoset').get('pages')) page += 1 for photo in list(res)[0].iter('photo'): photos[photo.attrib['title']] = photo.attrib # photo_list.append(list(res.find('photo'))) # for photo in res.find('photos'): # if photo.get('title') == guid: # return photo.get('id') # return None # photo_list = list(photos)[0] # photos = {} # for photo in photo_list: # photos[photo.attrib['title']] = photo.attrib return photos @retrier def create_set(self, title, photo_id): """docstring""" if not (title and photo_id): return False res = self.flickr.photosets_create(title=title, primary_photo_id=photo_id) self.update_from_server() return res.find('photoset').attrib['id'] @retrier def add_photo_to_set(self, setid, photoid): """docstring""" self.flickr.photosets_addPhoto(photoset_id=setid, photo_id=photoid) return True def get_photo_by_guid(self, guid): """docstring""" if not guid in self.photos: return None return self.photos[guid]["id"] # monkey patch for flickrapi problem # pages = partial(self.flickr.photos_search, user_id="me", per_page=500) # num_pages = page = 1 # while page <= num_pages: # LOG.debug("Retrieving page %i" % (page)) # res = pages(page=page) # num_pages = int(res.find('photos').get('pages')) # page += 1 # for photo in res.find('photos'): # if photo.get('title') == guid: # return photo.get('id') # return None def delete_orphaned_photos(self): """delete photos that were uploaded but don't exist in any set """ LOG.info("deleting orphaned photos") set_photos = [] for k, v in self.sets.items(): if k == self.flickr_namespace: continue set_photos.extend(self.get_photos_for_set(v["id"])) orphaned_photos = [p for p in self.photos if p not in set_photos] for photoid in orphaned_photos: LOG.info("delete photo %s", photoid) self.delete_photo_by_guid(photoid) if len(orphaned_photos) > 0: self.update_from_server() def delete_photo_by_guid(self, guid): """deletes a photo by GUID string""" photo_id = self.get_photo_by_guid(guid) if not photo_id: return None retval = self.flickr.photos_delete(photo_id=photo_id) return retval def _authorize_flickr(self, perms="delete"): """taken from flickrapi source, generates auth token and authorizes everything """ flickr = flickrapi.FlickrAPI(self.flickr_key, self.flickr_secret) (token, frob) = flickr.get_token_part_one(perms=perms) if not token: raw_input("Press ENTER after you authorized this program") token = flickr.get_token_part_two((token, frob)) return flickrapi.FlickrAPI(self.flickr_key, self.flickr_secret, token=token) # def _initialize_main_set(self): def upload_photo(self, guid, setname, photopath): """upload a photo and handle set issues""" photoid = None photo_exists = False if guid not in self.photos: LOG.info(" Uploading Image: %s", guid) photoid = self.upload_file(photopath, guid) else: LOG.info(" Image exists: %s", guid) photo_exists = True photoid = self.get_photo_by_guid(guid) # if pyphoto set doesn't exist yet if self.flickr_namespace not in self.sets.keys(): self.pyphoto_setid = self.create_set( self.flickr_namespace, photoid) elif not photo_exists: if not self.pyphoto_setid: self.pyphoto_setid = self.sets[self.flickr_namespace]["id"] self.add_photo_to_set(self.pyphoto_setid, photoid) if setname not in self.sets.keys(): LOG.debug("Creating new set: %s", setname) setid = self.create_set(setname, photoid) elif not photo_exists: setid = self.sets[setname]["id"] self.add_photo_to_set(setid, photoid)
advx9600/pjsip-android
refs/heads/master
tests/pjsua/scripts-media-playrec/100_resample_lf_11_8.py
59
# $Id: 100_resample_lf_11_8.py 2052 2008-06-25 18:18:32Z nanang $ # from inc_cfg import * # simple test test_param = TestParam( "Resample (large filter) 11 KHZ to 8 KHZ", [ InstanceParam("endpt", "--null-audio --quality 10 --clock-rate 8000 --play-file wavs/input.11.wav --rec-file wavs/tmp.8.wav") ] )
studio666/cjdns
refs/heads/master
node_build/dependencies/libuv/build/gyp/test/actions/gyptest-all.py
243
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies simple actions when using an explicit build target of 'all'. """ import glob import os import TestGyp test = TestGyp.TestGyp(workdir='workarea_all') test.run_gyp('actions.gyp', chdir='src') test.relocate('src', 'relocate/src') # Some gyp files use an action that mentions an output but never # writes it as a means to making the action run on every build. That # doesn't mesh well with ninja's semantics. TODO(evan): figure out # how to work always-run actions in to ninja. # Android also can't do this as it doesn't have order-only dependencies. if test.format in ['ninja', 'android']: test.build('actions.gyp', test.ALL, chdir='relocate/src') else: # Test that an "always run" action increases a counter on multiple # invocations, and that a dependent action updates in step. test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1') test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1') test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2') test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2') # The "always run" action only counts to 2, but the dependent target # will count forever if it's allowed to run. This verifies that the # dependent target only runs when the "always run" action generates # new output, not just because the "always run" ran. test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2') test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2') expect = """\ Hello from program.c Hello from make-prog1.py Hello from make-prog2.py """ if test.format == 'xcode': chdir = 'relocate/src/subdir1' else: chdir = 'relocate/src' test.run_built_executable('program', chdir=chdir, stdout=expect) test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n") expect = "Hello from generate_main.py\n" if test.format == 'xcode': chdir = 'relocate/src/subdir3' else: chdir = 'relocate/src' test.run_built_executable('null_input', chdir=chdir, stdout=expect) # Clean out files which may have been created if test.ALL was run. def clean_dep_files(): for file in (glob.glob('relocate/src/dep_*.txt') + glob.glob('relocate/src/deps_all_done_*.txt')): if os.path.exists(file): os.remove(file) # Confirm our clean. clean_dep_files() test.must_not_exist('relocate/src/dep_1.txt') test.must_not_exist('relocate/src/deps_all_done_first_123.txt') # Make sure all deps finish before an action is run on a 'None' target. # If using the Make builder, add -j to make things more difficult. arguments = [] if test.format == 'make': arguments = ['-j'] test.build('actions.gyp', 'action_with_dependencies_123', chdir='relocate/src', arguments=arguments) test.must_exist('relocate/src/deps_all_done_first_123.txt') # Try again with a target that has deps in reverse. Output files from # previous tests deleted. Confirm this execution did NOT run the ALL # target which would mess up our dep tests. clean_dep_files() test.build('actions.gyp', 'action_with_dependencies_321', chdir='relocate/src', arguments=arguments) test.must_exist('relocate/src/deps_all_done_first_321.txt') test.must_not_exist('relocate/src/deps_all_done_first_123.txt') test.pass_test()
wangjun/pinry
refs/heads/master
setup.py
7
import os from setuptools import setup, find_packages def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() install_requires = [ 'Django', 'Pillow', 'South', 'requests', 'django-taggit', 'django-images', 'django-braces', 'django_compressor', 'django-tastypie==0.9.14', ] setup( name="pinry", version="1.3.2", author="Pinry Contributors", author_email="[email protected]", description=("A tiling image board system for people who want to save, " "tag, and share images, videos and webpages."), license="Simplified BSD", keywords="django tiling board tag share images pictures videos webpages", url="http://getpinry.com/", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), long_description=read('README.rst'), tests_require=['mock', 'factory-boy>=1.3,<2.0'], install_requires=install_requires, classifiers=[ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Framework :: Django", "Environment :: Web Environment", ], )
westinedu/wrgroups
refs/heads/master
django/contrib/gis/db/backends/oracle/base.py
623
from django.db.backends.oracle.base import * from django.db.backends.oracle.base import DatabaseWrapper as OracleDatabaseWrapper from django.contrib.gis.db.backends.oracle.creation import OracleCreation from django.contrib.gis.db.backends.oracle.introspection import OracleIntrospection from django.contrib.gis.db.backends.oracle.operations import OracleOperations class DatabaseWrapper(OracleDatabaseWrapper): def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.ops = OracleOperations(self) self.creation = OracleCreation(self) self.introspection = OracleIntrospection(self)
sachintaware/sublime-wakatime
refs/heads/master
packages/wakatime/packages/pygments_py3/pygments/lexers/ecl.py
72
# -*- coding: utf-8 -*- """ pygments.lexers.ecl ~~~~~~~~~~~~~~~~~~~ Lexers for the ECL language. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, words from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Error __all__ = ['ECLLexer'] class ECLLexer(RegexLexer): """ Lexer for the declarative big-data `ECL <http://hpccsystems.com/community/docs/ecl-language-reference/html>`_ language. .. versionadded:: 1.5 """ name = 'ECL' aliases = ['ecl'] filenames = ['*.ecl'] mimetypes = ['application/x-ecl'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ include('whitespace'), include('statements'), ], 'whitespace': [ (r'\s+', Text), (r'\/\/.*', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), ], 'statements': [ include('types'), include('keywords'), include('functions'), include('hash'), (r'"', String, 'string'), (r'\'', String, 'string'), (r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float), (r'0x[0-9a-f]+[lu]*', Number.Hex), (r'0[0-7]+[lu]*', Number.Oct), (r'\d+[lu]*', Number.Integer), (r'\*/', Error), (r'[~!%^&*+=|?:<>/-]+', Operator), (r'[{}()\[\],.;]', Punctuation), (r'[a-z_]\w*', Name), ], 'hash': [ (r'^#.*$', Comment.Preproc), ], 'types': [ (r'(RECORD|END)\D', Keyword.Declaration), (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|' r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|' r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)', bygroups(Keyword.Type, Text)), ], 'keywords': [ (words(( 'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL', 'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT', 'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED', 'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT', 'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS', 'WAIT', 'WHEN'), suffix=r'\b'), Keyword.Reserved), # These are classed differently, check later (words(( 'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE', 'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT', 'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL', 'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED', 'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO', 'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE', 'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE', 'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP', 'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW', 'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM', 'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD', 'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'), Keyword.Reserved), ], 'functions': [ (words(( 'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE', 'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS', 'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE', 'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE', 'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE', 'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32', 'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID', 'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP', 'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE', 'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE', 'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED', 'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED', 'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF', 'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH', 'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP', 'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE', 'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'), Name.Function), ], 'string': [ (r'"', String, '#pop'), (r'\'', String, '#pop'), (r'[^"\']+', String), ], }
paulthulstrup/moose
refs/heads/master
gui/plug_ins/PeacockApplication.py
8
try: from PyQt4 import QtCore, QtGui QtCore.Signal = QtCore.pyqtSignal QtCore.Slot = QtCore.pyqtSlot except ImportError: try: from PySide import QtCore, QtGui QtCore.QString = str except ImportError: raise ImportError("Cannot load either PyQt or PySide") import MeshInfoFactory from MeshRenderWidget import * from InputFileWidget import * from ExecuteWidget import * from PostprocessorWidget import * from ExodusResultRenderWidget import * class PeacockApplication(object): def __init__(self, main_window): self.main_window = main_window ''' Should create and return a map of "Tab Name" to the associated Tab object in the order you want them to show up in Peacock. For the main tabs (input_file_widget, execute_widget, postprocessor_widget and visualize_widget) it should also set those member variables on the main_ui object that is passed in.''' def tabs(self, main_ui): tabs = [] main_ui.input_file_widget = InputFileWidget(main_ui.app_path, main_ui.options, main_ui, main_ui.qt_app, main_ui.application) main_ui.execute_widget = ExecuteWidget(main_ui.app_path, main_ui.input_file_widget, main_ui.qt_app) main_ui.postprocessor_widget = PostprocessorWidget(main_ui.input_file_widget, main_ui.execute_widget) main_ui.visualize_widget = ExodusResultRenderWidget(main_ui.input_file_widget, main_ui.execute_widget, main_ui.qt_app, main_ui.application) tabs.append(main_ui.input_file_widget) tabs.append(main_ui.execute_widget) tabs.append(main_ui.postprocessor_widget) tabs.append(main_ui.visualize_widget) return tabs ''' This function is responsible for filling in the valid options for each "cpp_type" for parameters. The return value must be a dictionary... where the key is the cpp_type and the value is a set() of options. This default implementation works well for most MOOSE based applications. ''' def typeOptions(self): input_file_widget = self.main_window.input_file_widget tree_widget = input_file_widget.tree_widget type_options = {} # Variables variable_names = tree_widget.getChildNamesOfPath('Variables') type_options['std::vector<NonlinearVariableName, std::allocator<NonlinearVariableName> >'] = set() type_options['std::vector<NonlinearVariableName>'] = set() type_options['NonlinearVariableName'] = set() type_options['std::vector<VariableName, std::allocator<VariableName> >'] = set() type_options['std::vector<VariableName>'] = set() type_options['VariableName'] = set() if len(variable_names): type_options['std::vector<NonlinearVariableName, std::allocator<NonlinearVariableName> >'] |= set(variable_names) type_options['std::vector<NonlinearVariableName>'] |= set(variable_names) type_options['NonlinearVariableName'] |= set(variable_names) type_options['std::vector<VariableName, std::allocator<VariableName> >'] |= set(variable_names) type_options['std::vector<VariableName>'] |= set(variable_names) type_options['VariableName'] |= set(variable_names) # Aux Vars aux_variable_names = tree_widget.getChildNamesOfPath('AuxVariables') if len(aux_variable_names): type_options['std::vector<AuxVariableName, std::allocator<AuxVariableName> >'] = set(aux_variable_names) type_options['std::vector<AuxVariableName>'] = set(aux_variable_names) type_options['AuxVariableName'] = set(aux_variable_names) type_options['std::vector<VariableName, std::allocator<VariableName> >'] |= set(aux_variable_names) type_options['std::vector<VariableName>'] |= set(aux_variable_names) type_options['VariableName'] |= set(aux_variable_names) # Functions function_names = tree_widget.getChildNamesOfPath('Functions') if len(function_names): type_options['std::vector<FunctionName, std::allocator<FunctionName> >'] = set(function_names) type_options['std::vector<FunctionName>'] = set(function_names) type_options['FunctionName'] = set(function_names) # Postprocessors postprocessor_names = tree_widget.getChildNamesOfPath('Postprocessors') if len(postprocessor_names): type_options['std::vector<PostprocessorName, std::allocator<PostprocessorName> >'] = set(postprocessor_names) type_options['std::vector<PostprocessorName>'] = set(postprocessor_names) type_options['PostprocessorName'] = set(postprocessor_names) # UserObjects user_object_names = tree_widget.getChildNamesOfPath('UserObjects') if len(user_object_names): type_options['std::vector<UserObjectName, std::allocator<UserObjectName> >'] = set(user_object_names) type_options['std::vector<UserObjectName>'] = set(user_object_names) type_options['UserObjectName'] = set(user_object_names) # Markers marker_names = tree_widget.getChildNamesOfPath('Adaptivity/Markers') if len(marker_names): type_options['std::vector<MarkerName, std::allocator<MarkerName> >'] = set(marker_names) type_options['std::vector<MarkerName>'] = set(marker_names) type_options['MarkerName'] = set(marker_names) # Indicators indicator_names = tree_widget.getChildNamesOfPath('Adaptivity/Indicators') if len(indicator_names): type_options['std::vector<IndicatorName, std::allocator<IndicatorName> >'] = set(indicator_names) type_options['std::vector<IndicatorName>'] = set(indicator_names) type_options['IndicatorName'] = set(indicator_names) # MultiApps multi_app_names = tree_widget.getChildNamesOfPath('MultiApps') if len(multi_app_names): type_options['std::vector<MultiAppName, std::allocator<MultiAppName> >'] = set(multi_app_names) type_options['std::vector<MultiAppName>'] = set(multi_app_names) type_options['MultiAppName'] = set(multi_app_names) # Outputs output_names = tree_widget.getChildNamesOfPath('Outputs') if len(output_names): type_options['std::vector<OutputName, std::allocator<OutputName> >'] = set(output_names) type_options['std::vector<OutputName>'] = set(output_names) type_options['OutputName'] = set(output_names) # DiscreteMaterials discrete_name = tree_widget.getChildNamesOfPath('Materials') if len(output_names): type_options['std::vector<DiscreteMaterialName, std::allocator<DiscreteMaterialName> >'] = set(discrete_name) type_options['std::vector<DiscreteMaterialName>'] = set(discrete_name) type_options['DiscreteMaterialName'] = set(discrete_name) # Mesh stuff mesh_data = tree_widget.getMeshItemData() if mesh_data: mesh_info = MeshInfoFactory.getMeshInfo(mesh_data) if mesh_info: type_options['std::vector<BlockName>'] = mesh_info.blockNames() type_options['BlockName'] = mesh_info.blockNames() type_options['std::vector<BoundaryName, std::allocator<BoundaryName> >'] = mesh_info.sidesetNames() type_options['std::vector<BoundaryName>'] = mesh_info.sidesetNames() type_options['BoundaryName'] = mesh_info.sidesetNames() type_options['std::vector<BoundaryName, std::allocator<BoundaryName> >'].update(mesh_info.nodesetNames()) type_options['std::vector<BoundaryName>'].update(mesh_info.nodesetNames()) type_options['BoundaryName'].update(mesh_info.nodesetNames()) type_options['std::vector<SubdomainName, std::allocator<SubdomainName> >'] = mesh_info.blockNames() type_options['std::vector<SubdomainName>'] = mesh_info.blockNames() type_options['SubdomainName'] = mesh_info.blockNames() return type_options ''' This is the graphical view of the mesh that is shown next to the input file tree view. This function should return a QWidget derived class.''' def meshRenderWidget(self, input_file_widget): return MeshRenderWidget(input_file_widget.tree_widget) ''' Whether or not to show the meshrenderwidget by default. For normal MOOSE based applications this is False and the meshrenderwidget is only shown after the Mesh block has been edited. But, some applications don't have Mesh blocks... The return value is a boolean. ''' def showMeshRenderWidgetByDefault(self): return False ''' This function allows you to apply VTK filters to the result before it is rendered in the visualize widget. The incoming object is vtkPolyData... you will most likely want to create a VTK filter and then attach the output of the result_vtk_object to the input of your filter. The return value of this function MUST be a VTK object that provides vtkPolyData on its output port ''' def filterResult(self, result_vtk_object): return result_vtk_object def addExodusResultActors(self, appRenderer): pass def addRelapSave(self, layout): pass def addNumberHistory(self, command_layout): pass
jos4uke/getSeqFlankBlatHit
refs/heads/master
lib/python2.7/site-packages/pybedtools/scripts/venn_gchart.py
1
#!/usr/bin/env python """ Given 3 files, creates a 3-way Venn diagram of intersections using the Google \ Chart API; see :mod:`pybedtools.contrib.venn_maker` for more flexibility. The values in the diagram assume: * unstranded intersections * no features that are nested inside larger features """ import argparse import sys import pybedtools import urllib import urllib2 def venn_gchart(a, b, c=None, colors=None, labels=None, size='300x300'): """ a, b, and c are filenames to BED-like files. *colors* is a list of 3 hex colors *labels* is a list of 3 labels *outfn* is the output PNG you want to create. *size* is the size in pixels for the PNG """ a = pybedtools.BedTool(a) b = pybedtools.BedTool(b) if c: c = pybedtools.BedTool(c) # The order of values is meaningful to the API, see # http://code.google.com/apis/chart/docs/gallery/venn_charts.html if c: vals = [len(a), len(b), len(c), len(a + b), len(a + c), len(b + c), len(a + b + c)] else: # insert 0 for size of 3rd circle. vals = [len(a), len(b), 0, len(a + b)] labels = labels[:2] # API doesn't seem to like large numbers, so get fractions instead, then # join make a comma-separated list of values. mx = float(max(vals)) vals = [i / mx for i in vals] valstr = ','.join(map(str, vals)) data = {'cht': 'v', 'chs': size, 'chd': 't:' + valstr} # Add the optional data, if specified if labels: data['chdl'] = '|'.join(labels) if colors: data['chco'] = ','.join(colors) return data def gchart(data, outfn='out.png'): """ Sends data to Google Chart API """ data = urllib.urlencode(data) url = 'https://chart.googleapis.com/chart?' # Request and get the PNG req = urllib2.Request(url, data) print url + data response = urllib2.urlopen(req) f = open(outfn, 'w') f.write(response.read()) f.close() def main(): """Create a 3-way Venn diagram using Google Charts API """ op = argparse.ArgumentParser(description=__doc__, prog=sys.argv[0]) op.add_argument('-a', help='File to use for the left-most circle') op.add_argument('-b', help='File to use for the right-most circle') op.add_argument('-c', help='File to use for the bottom circle') op.add_argument('--colors', help='Optional comma-separated list of hex ' 'colors for circles a, b, and c. E.g. %(default)s', default='00FF00,FF0000,0000FF') op.add_argument('--labels', help='Optional comma-separated list of labels for a, b, and c', default='a,b,c') op.add_argument('--size', default='300x300', help='Optional size of PNG, in pixels. Default is ' '"%(default)s"') op.add_argument('-o', default='out.png', help='Output file to save as, in PNG format') op.add_argument('--test', action='store_true', help='run test, overriding all other options.') options = op.parse_args() reqd_args = ['a', 'b'] if not options.test: for ra in reqd_args: if not getattr(options, ra): op.print_help() sys.stderr.write('Missing required arg "%s"\n' % ra) sys.exit(1) if options.test: # Example data pybedtools.bedtool.random.seed(1) a = pybedtools.example_bedtool('rmsk.hg18.chr21.small.bed') b = pybedtools.example_bedtool('venn.b.bed') c = pybedtools.example_bedtool('venn.c.bed') options.a = a.fn options.b = b.fn options.c = c.fn options.colors = '00FF00,FF0000,0000FF' options.o = 'out.png' options.labels = 'a,b,c' data = venn_gchart(a=options.a, b=options.b, c=options.c, colors=options.colors.split(','), labels=options.labels.split(','), size=options.size) gchart(data, outfn=options.o) if __name__ == "__main__": import doctest if doctest.testmod(optionflags=doctest.ELLIPSIS).failed == 0: main()
todaychi/hue
refs/heads/master
desktop/core/ext-py/boto-2.46.1/boto/vpc/vpngateway.py
170
# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Represents a Vpn Gateway """ from boto.ec2.ec2object import TaggedEC2Object class Attachment(object): def __init__(self, connection=None): self.vpc_id = None self.state = None def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'vpcId': self.vpc_id = value elif name == 'state': self.state = value else: setattr(self, name, value) class VpnGateway(TaggedEC2Object): def __init__(self, connection=None): super(VpnGateway, self).__init__(connection) self.id = None self.type = None self.state = None self.availability_zone = None self.attachments = [] def __repr__(self): return 'VpnGateway:%s' % self.id def startElement(self, name, attrs, connection): retval = super(VpnGateway, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'item': att = Attachment() self.attachments.append(att) return att def endElement(self, name, value, connection): if name == 'vpnGatewayId': self.id = value elif name == 'type': self.type = value elif name == 'state': self.state = value elif name == 'availabilityZone': self.availability_zone = value elif name == 'attachments': pass else: setattr(self, name, value) def attach(self, vpc_id, dry_run=False): return self.connection.attach_vpn_gateway( self.id, vpc_id, dry_run=dry_run )
Clumsy-Kernel-Development/HTC_10_Kernel
refs/heads/master
tools/perf/scripts/python/netdev-times.py
1544
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, callchain, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
pe-suke/ansible
refs/heads/devel
lib/ansible/module_utils/urls.py
125
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Michael DeHaan <[email protected]>, 2012-2013 # Copyright (c), Toshio Kuratomi <[email protected]>, 2015 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The match_hostname function and supporting code is under the terms and # conditions of the Python Software Foundation License. They were taken from # the Python3 standard library and adapted for use in Python2. See comments in the # source for which code precisely is under this License. PSF License text # follows: # # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 # -------------------------------------------- # # 1. This LICENSE AGREEMENT is between the Python Software Foundation # ("PSF"), and the Individual or Organization ("Licensee") accessing and # otherwise using this software ("Python") in source or binary form and # its associated documentation. # # 2. Subject to the terms and conditions of this License Agreement, PSF hereby # grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, # analyze, test, perform and/or display publicly, prepare derivative works, # distribute, and otherwise use Python alone or in any derivative version, # provided, however, that PSF's License Agreement and PSF's notice of copyright, # i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, # 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are # retained in Python alone or in any derivative version prepared by Licensee. # # 3. In the event Licensee prepares a derivative work that is based on # or incorporates Python or any part thereof, and wants to make # the derivative work available to others as provided herein, then # Licensee hereby agrees to include in any such work a brief summary of # the changes made to Python. # # 4. PSF is making Python available to Licensee on an "AS IS" # basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR # IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND # DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS # FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT # INFRINGE ANY THIRD PARTY RIGHTS. # # 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON # FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS # A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, # OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. # # 6. This License Agreement will automatically terminate upon a material # breach of its terms and conditions. # # 7. Nothing in this License Agreement shall be deemed to create any # relationship of agency, partnership, or joint venture between PSF and # Licensee. This License Agreement does not grant permission to use PSF # trademarks or trade name in a trademark sense to endorse or promote # products or services of Licensee, or any third party. # # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. try: import urllib2 HAS_URLLIB2 = True except: HAS_URLLIB2 = False try: import urlparse HAS_URLPARSE = True except: HAS_URLPARSE = False try: import ssl HAS_SSL = True except: HAS_SSL = False try: # SNI Handling needs python2.7.9's SSLContext from ssl import create_default_context, SSLContext HAS_SSLCONTEXT = True except ImportError: HAS_SSLCONTEXT = False # Select a protocol that includes all secure tls protocols # Exclude insecure ssl protocols if possible if HAS_SSL: # If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient PROTOCOL = ssl.PROTOCOL_TLSv1 if not HAS_SSLCONTEXT and HAS_SSL: try: import ctypes, ctypes.util except ImportError: # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl) pass else: libssl_name = ctypes.util.find_library('ssl') libssl = ctypes.CDLL(libssl_name) for method in ('TLSv1_1_method', 'TLSv1_2_method'): try: libssl[method] # Found something - we'll let openssl autonegotiate and hope # the server has disabled sslv2 and 3. best we can do. PROTOCOL = ssl.PROTOCOL_SSLv23 break except AttributeError: pass del libssl HAS_MATCH_HOSTNAME = True try: from ssl import match_hostname, CertificateError except ImportError: try: from backports.ssl_match_hostname import match_hostname, CertificateError except ImportError: HAS_MATCH_HOSTNAME = False if not HAS_MATCH_HOSTNAME: ### ### The following block of code is under the terms and conditions of the ### Python Software Foundation License ### """The match_hostname() function from Python 3.4, essential when using SSL.""" import re class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False # Ported from python3-syntax: # leftmost, *remainder = dn.split(r'.') parts = dn.split(r'.') leftmost = parts[0] remainder = parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") ### ### End of Python Software Foundation Licensed code ### HAS_MATCH_HOSTNAME = True import httplib import os import re import sys import socket import platform import tempfile import base64 # This is a dummy cacert provided for Mac OS since you need at least 1 # ca cert, regardless of validity, for Python on Mac OS to use the # keychain functionality in OpenSSL for validating SSL certificates. # See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher DUMMY_CA_CERT = """-----BEGIN CERTIFICATE----- MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9 gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1 4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg= -----END CERTIFICATE----- """ # # Exceptions # class ConnectionError(Exception): """Failed to connect to the server""" pass class ProxyError(ConnectionError): """Failure to connect because of a proxy""" pass class SSLValidationError(ConnectionError): """Failure to connect due to SSL validation failing""" pass class NoSSLError(SSLValidationError): """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" pass class CustomHTTPSConnection(httplib.HTTPSConnection): def __init__(self, *args, **kwargs): httplib.HTTPSConnection.__init__(self, *args, **kwargs) if HAS_SSLCONTEXT: self.context = create_default_context() if self.cert_file: self.context.load_cert_chain(self.cert_file, self.key_file) def connect(self): "Connect to a host on a given (SSL) port." if hasattr(self, 'source_address'): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() if HAS_SSLCONTEXT: self.sock = self.context.wrap_socket(sock, server_hostname=self.host) else: self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) class CustomHTTPSHandler(urllib2.HTTPSHandler): def https_open(self, req): return self.do_open(CustomHTTPSConnection, req) https_request = urllib2.AbstractHTTPHandler.do_request_ def generic_urlparse(parts): ''' Returns a dictionary of url parts as parsed by urlparse, but accounts for the fact that older versions of that library do not support named attributes (ie. .netloc) ''' generic_parts = dict() if hasattr(parts, 'netloc'): # urlparse is newer, just read the fields straight # from the parts object generic_parts['scheme'] = parts.scheme generic_parts['netloc'] = parts.netloc generic_parts['path'] = parts.path generic_parts['params'] = parts.params generic_parts['query'] = parts.query generic_parts['fragment'] = parts.fragment generic_parts['username'] = parts.username generic_parts['password'] = parts.password generic_parts['hostname'] = parts.hostname generic_parts['port'] = parts.port else: # we have to use indexes, and then parse out # the other parts not supported by indexing generic_parts['scheme'] = parts[0] generic_parts['netloc'] = parts[1] generic_parts['path'] = parts[2] generic_parts['params'] = parts[3] generic_parts['query'] = parts[4] generic_parts['fragment'] = parts[5] # get the username, password, etc. try: netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$') (auth, hostname, port) = netloc_re.match(parts[1]) if port: # the capture group for the port will include the ':', # so remove it and convert the port to an integer port = int(port[1:]) if auth: # the capture group above inclues the @, so remove it # and then split it up based on the first ':' found auth = auth[:-1] username, password = auth.split(':', 1) generic_parts['username'] = username generic_parts['password'] = password generic_parts['hostname'] = hostname generic_parts['port'] = port except: generic_parts['username'] = None generic_parts['password'] = None generic_parts['hostname'] = None generic_parts['port'] = None return generic_parts class RequestWithMethod(urllib2.Request): ''' Workaround for using DELETE/PUT/etc with urllib2 Originally contained in library/net_infrastructure/dnsmadeeasy ''' def __init__(self, url, method, data=None, headers={}): self._method = method urllib2.Request.__init__(self, url, data, headers) def get_method(self): if self._method: return self._method else: return urllib2.Request.get_method(self) class SSLValidationHandler(urllib2.BaseHandler): ''' A custom handler class for SSL validation. Based on: http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python http://techknack.net/python-urllib2-handlers/ ''' CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n" def __init__(self, hostname, port): self.hostname = hostname self.port = port def get_ca_certs(self): # tries to find a valid CA cert in one of the # standard locations for the current distribution ca_certs = [] paths_checked = [] system = platform.system() # build a list of paths to check for .crt/.pem files # based on the platform type paths_checked.append('/etc/ssl/certs') if system == 'Linux': paths_checked.append('/etc/pki/ca-trust/extracted/pem') paths_checked.append('/etc/pki/tls/certs') paths_checked.append('/usr/share/ca-certificates/cacert.org') elif system == 'FreeBSD': paths_checked.append('/usr/local/share/certs') elif system == 'OpenBSD': paths_checked.append('/etc/ssl') elif system == 'NetBSD': ca_certs.append('/etc/openssl/certs') elif system == 'SunOS': paths_checked.append('/opt/local/etc/openssl/certs') # fall back to a user-deployed cert in a standard # location if the OS platform one is not available paths_checked.append('/etc/ansible') tmp_fd, tmp_path = tempfile.mkstemp() # Write the dummy ca cert if we are running on Mac OS X if system == 'Darwin': os.write(tmp_fd, DUMMY_CA_CERT) # Default Homebrew path for OpenSSL certs paths_checked.append('/usr/local/etc/openssl') # for all of the paths, find any .crt or .pem files # and compile them into single temp file for use # in the ssl check to speed up the test for path in paths_checked: if os.path.exists(path) and os.path.isdir(path): dir_contents = os.listdir(path) for f in dir_contents: full_path = os.path.join(path, f) if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'): try: cert_file = open(full_path, 'r') os.write(tmp_fd, cert_file.read()) os.write(tmp_fd, '\n') cert_file.close() except: pass return (tmp_path, paths_checked) def validate_proxy_response(self, response, valid_codes=[200]): ''' make sure we get back a valid code from the proxy ''' try: (http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups() if int(resp_code) not in valid_codes: raise Exception except: raise ProxyError('Connection to proxy failed') def detect_no_proxy(self, url): ''' Detect if the 'no_proxy' environment variable is set and honor those locations. ''' env_no_proxy = os.environ.get('no_proxy') if env_no_proxy: env_no_proxy = env_no_proxy.split(',') netloc = urlparse.urlparse(url).netloc for host in env_no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # Our requested URL matches something in no_proxy, so don't # use the proxy for this return False return True def _make_context(self, tmp_ca_cert_path): context = create_default_context() context.load_verify_locations(tmp_ca_cert_path) return context def http_request(self, req): tmp_ca_cert_path, paths_checked = self.get_ca_certs() https_proxy = os.environ.get('https_proxy') context = None if HAS_SSLCONTEXT: context = self._make_context(tmp_ca_cert_path) # Detect if 'no_proxy' environment variable is set and if our URL is included use_proxy = self.detect_no_proxy(req.get_full_url()) if not use_proxy: # ignore proxy settings for this host request return req try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if https_proxy: proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy)) s.connect((proxy_parts.get('hostname'), proxy_parts.get('port'))) if proxy_parts.get('scheme') == 'http': s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port)) if proxy_parts.get('username'): credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password','')) s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip()) s.sendall('\r\n') connect_result = s.recv(4096) self.validate_proxy_response(connect_result) if context: ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname')) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) else: raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection #ssl_s.unwrap() s.close() except (ssl.SSLError, socket.error), e: # fail if we tried all of the certs but none worked if 'connection refused' in str(e).lower(): raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port)) else: raise SSLValidationError('Failed to validate the SSL certificate for %s:%s.' ' Make sure your managed systems have a valid CA' ' certificate installed. If the website serving the url' ' uses SNI you need python >= 2.7.9 on your managed' ' machine. You can use validate_certs=False if you do' ' not need to confirm the server\s identity but this is' ' unsafe and not recommended' ' Paths checked for this platform: %s' % (self.hostname, self.port, ", ".join(paths_checked)) ) except CertificateError: raise SSLValidationError("SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=False (insecure)" % self.hostname) try: # cleanup the temp file created, don't worry # if it fails for some reason os.remove(tmp_ca_cert_path) except: pass return req https_request = http_request # Rewrite of fetch_url to not require the module environment def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None, force_basic_auth=False): ''' Fetches a file from an HTTP/FTP server using urllib2 ''' handlers = [] # FIXME: change the following to use the generic_urlparse function # to remove the indexed references for 'parsed' parsed = urlparse.urlparse(url) if parsed[0] == 'https' and validate_certs: if not HAS_SSL: raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False, however this is unsafe and not recommended') # do the cert validation netloc = parsed[1] if '@' in netloc: netloc = netloc.split('@', 1)[1] if ':' in netloc: hostname, port = netloc.split(':', 1) port = int(port) else: hostname = netloc port = 443 # create the SSL validation handler and # add it to the list of handlers ssl_handler = SSLValidationHandler(hostname, port) handlers.append(ssl_handler) if parsed[0] != 'ftp': username = url_username if username: password = url_password netloc = parsed[1] elif '@' in parsed[1]: credentials, netloc = parsed[1].split('@', 1) if ':' in credentials: username, password = credentials.split(':', 1) else: username = credentials password = '' parsed = list(parsed) parsed[1] = netloc # reconstruct url without credentials url = urlparse.urlunparse(parsed) if username and not force_basic_auth: passman = urllib2.HTTPPasswordMgrWithDefaultRealm() # this creates a password manager passman.add_password(None, netloc, username, password) # because we have put None at the start it will always # use this username/password combination for urls # for which `theurl` is a super-url authhandler = urllib2.HTTPBasicAuthHandler(passman) # create the AuthHandler handlers.append(authhandler) elif username and force_basic_auth: if headers is None: headers = {} headers["Authorization"] = "Basic %s" % base64.b64encode("%s:%s" % (username, password)) if not use_proxy: proxyhandler = urllib2.ProxyHandler({}) handlers.append(proxyhandler) # pre-2.6 versions of python cannot use the custom https # handler, since the socket class is lacking this method if hasattr(socket, 'create_connection'): handlers.append(CustomHTTPSHandler) opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) if method: if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'): raise ConnectionError('invalid HTTP request method; %s' % method.upper()) request = RequestWithMethod(url, method.upper(), data) else: request = urllib2.Request(url, data) # add the custom agent header, to help prevent issues # with sites that block the default urllib agent string request.add_header('User-agent', http_agent) # if we're ok with getting a 304, set the timestamp in the # header, otherwise make sure we don't get a cached copy if last_mod_time and not force: tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000') request.add_header('If-Modified-Since', tstamp) else: request.add_header('cache-control', 'no-cache') # user defined headers now, which may override things we've set above if headers: if not isinstance(headers, dict): raise ValueError("headers provided to fetch_url() must be a dict") for header in headers: request.add_header(header, headers[header]) urlopen_args = [request, None] if sys.version_info >= (2,6,0): # urlopen in python prior to 2.6.0 did not # have a timeout parameter urlopen_args.append(timeout) if HAS_SSLCONTEXT and not validate_certs: # In 2.7.9, the default context validates certificates context = SSLContext(ssl.PROTOCOL_SSLv23) context.options |= ssl.OP_NO_SSLv2 context.options |= ssl.OP_NO_SSLv3 context.verify_mode = ssl.CERT_NONE context.check_hostname = False urlopen_args += (None, None, None, context) r = urllib2.urlopen(*urlopen_args) return r # # Module-related functions # def url_argument_spec(): ''' Creates an argument spec that can be used with any module that will be requesting content via urllib/urllib2 ''' return dict( url = dict(), force = dict(default='no', aliases=['thirsty'], type='bool'), http_agent = dict(default='ansible-httpget'), use_proxy = dict(default='yes', type='bool'), validate_certs = dict(default='yes', type='bool'), url_username = dict(required=False), url_password = dict(required=False), force_basic_auth = dict(required=False, type='bool', default='no'), ) def fetch_url(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10): ''' Fetches a file from an HTTP/FTP server using urllib2. Requires the module environment ''' if not HAS_URLLIB2: module.fail_json(msg='urllib2 is not installed') elif not HAS_URLPARSE: module.fail_json(msg='urlparse is not installed') # Get validate_certs from the module params validate_certs = module.params.get('validate_certs', True) username = module.params.get('url_username', '') password = module.params.get('url_password', '') http_agent = module.params.get('http_agent', None) force_basic_auth = module.params.get('force_basic_auth', '') r = None info = dict(url=url) try: r = open_url(url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=username, url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth) info.update(r.info()) info['url'] = r.geturl() # The URL goes in too, because of redirects. info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) except NoSSLError, e: distribution = get_distribution() if distribution.lower() == 'redhat': module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e)) except (ConnectionError, ValueError), e: module.fail_json(msg=str(e)) except urllib2.HTTPError, e: info.update(dict(msg=str(e), status=e.code)) except urllib2.URLError, e: code = int(getattr(e, 'code', -1)) info.update(dict(msg="Request failed: %s" % str(e), status=code)) except socket.error, e: info.update(dict(msg="Connection failure: %s" % str(e), status=-1)) except Exception, e: info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1)) return r, info
bop/foundation
refs/heads/master
lib/python2.7/site-packages/django/contrib/flatpages/views.py
94
from django.contrib.flatpages.models import FlatPage from django.template import loader, RequestContext from django.shortcuts import get_object_or_404 from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect from django.conf import settings from django.core.xheaders import populate_xheaders from django.utils.safestring import mark_safe from django.views.decorators.csrf import csrf_protect DEFAULT_TEMPLATE = 'flatpages/default.html' # This view is called from FlatpageFallbackMiddleware.process_response # when a 404 is raised, which often means CsrfViewMiddleware.process_view # has not been called even if CsrfViewMiddleware is installed. So we need # to use @csrf_protect, in case the template needs {% csrf_token %}. # However, we can't just wrap this view; if no matching flatpage exists, # or a redirect is required for authentication, the 404 needs to be returned # without any CSRF checks. Therefore, we only # CSRF protect the internal implementation. def flatpage(request, url): """ Public interface to the flat page view. Models: `flatpages.flatpages` Templates: Uses the template defined by the ``template_name`` field, or `flatpages/default.html` if template_name is not defined. Context: flatpage `flatpages.flatpages` object """ if not url.startswith('/'): url = '/' + url try: f = get_object_or_404(FlatPage, url__exact=url, sites__id__exact=settings.SITE_ID) except Http404: if not url.endswith('/') and settings.APPEND_SLASH: url += '/' f = get_object_or_404(FlatPage, url__exact=url, sites__id__exact=settings.SITE_ID) return HttpResponsePermanentRedirect('%s/' % request.path) else: raise return render_flatpage(request, f) @csrf_protect def render_flatpage(request, f): """ Internal interface to the flat page view. """ # If registration is required for accessing this page, and the user isn't # logged in, redirect to the login page. if f.registration_required and not request.user.is_authenticated(): from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.path) if f.template_name: t = loader.select_template((f.template_name, DEFAULT_TEMPLATE)) else: t = loader.get_template(DEFAULT_TEMPLATE) # To avoid having to always use the "|safe" filter in flatpage templates, # mark the title and content as already safe (since they are raw HTML # content in the first place). f.title = mark_safe(f.title) f.content = mark_safe(f.content) c = RequestContext(request, { 'flatpage': f, }) response = HttpResponse(t.render(c)) populate_xheaders(request, response, FlatPage, f.id) return response
maizy/giiit
refs/heads/master
giiit/models/tree.py
1
# _*_ coding: utf-8 _*_# _*_ coding: utf-8 _*_ # Copyright (c) Nikita Kovaliov, maizy.ru, 2013 # See LICENSE.txt for details. from __future__ import unicode_literals, absolute_import from collections import namedtuple STATUS_CODES = { ' ': 'unmodified', 'M': 'modified', 'A': 'added', 'D': 'deleted', 'R': 'renamed', 'C': 'copied', 'U': 'unmerged', '?': 'untracked', '!': 'ignored', } Entity = namedtuple('Entity', ['work_tree_status_code', 'work_tree_status', 'index_status_code', 'index_status', 'path', 'new_path'])
punkkeks/OctoPrint
refs/heads/master
tests/slicing/__init__.py
47
# coding=utf-8 """ Unit tests for ``octoprint.slicing``. """ from __future__ import absolute_import __author__ = "Gina Häußge <[email protected]>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
pjg101/SickRage
refs/heads/master
lib/twilio/rest/resources/conferences.py
51
from .util import parse_date, normalize_dates from . import InstanceResource, ListResource class Participant(InstanceResource): id_key = "call_sid" def mute(self): """ Mute the participant """ self.update_instance(muted="true") def unmute(self): """ Unmute the participant """ self.update_instance(muted="false") def kick(self): """ Remove the participant from the given conference """ self.delete_instance() class Participants(ListResource): name = "Participants" instance = Participant def list(self, **kwargs): """ Returns a list of :class:`Participant` resources in the given conference :param conference_sid: Conference this participant is part of :param boolean muted: If True, only show participants who are muted """ return self.get_instances(kwargs) def mute(self, call_sid): """ Mute the given participant """ return self.update(call_sid, muted=True) def unmute(self, call_sid): """ Unmute the given participant """ return self.update(call_sid, muted=False) def kick(self, call_sid): """ Remove the participant from the given conference """ return self.delete(call_sid) def delete(self, call_sid): """ Remove the participant from the given conference """ return self.delete_instance(call_sid) def update(self, sid, **kwargs): """ :param sid: Participant identifier :param boolean muted: If true, mute this participant """ return self.update_instance(sid, kwargs) class Conference(InstanceResource): subresources = [ Participants ] class Conferences(ListResource): name = "Conferences" instance = Conference @normalize_dates def list(self, updated_before=None, updated_after=None, created_after=None, created_before=None, updated=None, created=None, **kwargs): """ Return a list of :class:`Conference` resources :param status: Show conferences with this status :param friendly_name: Show conferences with this exact friendly_name :param date updated_after: List conferences updated after this date :param date updated_before: List conferences updated before this date :param date created_after: List conferences created after this date :param date created_before: List conferences created before this date """ kwargs["DateUpdated"] = parse_date(kwargs.get("date_updated", updated)) kwargs["DateCreated"] = parse_date(kwargs.get("date_created", created)) kwargs["DateUpdated<"] = updated_before kwargs["DateUpdated>"] = updated_after kwargs["DateCreated<"] = created_before kwargs["DateCreated>"] = created_after return self.get_instances(kwargs)
shaistaansari/django
refs/heads/master
django/contrib/postgres/lookups.py
199
from django.db.models import Lookup, Transform class PostgresSimpleLookup(Lookup): def as_sql(self, qn, connection): lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) params = lhs_params + rhs_params return '%s %s %s' % (lhs, self.operator, rhs), params class FunctionTransform(Transform): def as_sql(self, qn, connection): lhs, params = qn.compile(self.lhs) return "%s(%s)" % (self.function, lhs), params class DataContains(PostgresSimpleLookup): lookup_name = 'contains' operator = '@>' class ContainedBy(PostgresSimpleLookup): lookup_name = 'contained_by' operator = '<@' class Overlap(PostgresSimpleLookup): lookup_name = 'overlap' operator = '&&' class HasKey(PostgresSimpleLookup): lookup_name = 'has_key' operator = '?' class HasKeys(PostgresSimpleLookup): lookup_name = 'has_keys' operator = '?&' class HasAnyKeys(PostgresSimpleLookup): lookup_name = 'has_any_keys' operator = '?|' class Unaccent(FunctionTransform): bilateral = True lookup_name = 'unaccent' function = 'UNACCENT'
simonsdave/tor-async-couchdb
refs/heads/master
tor_async_couchdb/async_model_actions.py
1
"""This module contains a collection of classes that implement Tornado async actions against CouchDB. """ import httplib import json import logging import re import urllib import tornado.httputil import tornado.httpclient import tornado.ioloop import tamper _logger = logging.getLogger("async_actions.%s" % __name__) """```database``` points to a CouchDB database. By default it points to a local CouchDB database. It is expected that the service's mainline will update this configuration. """ database = "http://127.0.0.1:5984/database" """If not None, ```tampering_signer``` is the keyczar signer used to enforce tampering proofing of the CouchDB database. """ tampering_signer = None """If CouchDB requires basic authentication in order to access it then set ```username``` and ```password``` to appropriate non-None values. """ username = None password = None """Certificate validation will be performned if ```validate_cert``` is ```True``` and we're interacting with CouchDB over TLS/SSL (ie ```database``` points to a URL starting with https). This config option is very useful when CouchDB self-signed certs. """ validate_cert = True def _fragmentation(data_size, disk_size): """Think of the fragmentation metric is that it's a measure of the % of the database or view that's used to store old documents and their associated metadata. See https://wiki.apache.org/couchdb/Compaction and http://docs.couchdb.org/en/latest/config/compaction.html#compaction-daemon-rules for details on fragmentation calculation. """ if data_size is None or disk_size is None: return None fragmentation = ((disk_size - float(data_size)) / disk_size) * 100.0 return int(round(fragmentation, 0)) class CouchDBAsyncHTTPRequest(tornado.httpclient.HTTPRequest): """```CouchDBAsyncHTTPRequest``` extends ```tornado.httpclient.HTTPRequest``` adding ... """ def __init__(self, path, method, body_as_dict): assert not path.startswith('/') url = "%s/%s" % (database, path) headers = { "Accept": "application/json", "Accept-Encoding": "charset=utf8", } if body_as_dict is not None: if tampering_signer: tamper.sign(tampering_signer, body_as_dict) body = json.dumps(body_as_dict) headers["Content-Type"] = "application/json; charset=utf8" else: body = None auth_mode = "basic" if username or password else None tornado.httpclient.HTTPRequest.__init__( self, url, method=method, body=body, headers=tornado.httputil.HTTPHeaders(headers), validate_cert=validate_cert, auth_mode=auth_mode, auth_username=username, auth_password=password) class CouchDBAsyncHTTPClient(object): """```CouchDBAsyncHTTPClient``` wraps ```tornado.httpclient.AsyncHTTPClient``` by adding standardized logging of error messages and calculating LCP response times for subsequent use in performance analysis and health monitoring. """ def __init__(self, expected_response_code, create_model_from_doc, expect_one_document=False): object.__init__(self) self.expected_response_code = expected_response_code self.create_model_from_doc = create_model_from_doc self.expect_one_document = expect_one_document self._callback = None def fetch(self, request, callback): """fetch() is perhaps not the best name but it matches the key method in the async HTTP client classs:-). """ assert self._callback is None self._callback = callback http_client = tornado.httpclient.AsyncHTTPClient() http_client.fetch( request, callback=self._on_http_client_fetch_done) def _on_http_client_fetch_done(self, response): # # write a message to the log which can be easily parsed # by performance analysis tools and used to understand # performance bottlenecks. # # http://tornado.readthedocs.org/en/latest/httpclient.html#response-objects # explains that the time_info attribute of a tornado response # object contains timing details of the phases of a request which # is available when using the cURL http client. a description # of these timing details can be found at # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html#TIMES # fmt = ( "CouchDB took {request_time:.2f} ms to respond " "with {http_response_code:d} to '{http_method}' " "against >>>{url}<<< - timing detail: " "q={queue:.2f} ms n={namelookup:.2f} ms " "c={connect:.2f} ms p={pretransfer:.2f} ms " "s={starttransfer:.2f} ms t={total:.2f} ms r={redirect:.2f} ms" ) msg_format_args = { "request_time": response.request_time * 1000, "http_response_code": response.code, "http_method": response.request.method, "url": response.effective_url, } def add_time_info_to_msg_format_args(key): msg_format_args[key] = response.time_info.get(key, 0) * 1000 add_time_info_to_msg_format_args("queue") add_time_info_to_msg_format_args("namelookup") add_time_info_to_msg_format_args("connect") add_time_info_to_msg_format_args("pretransfer") add_time_info_to_msg_format_args("starttransfer") add_time_info_to_msg_format_args("total") add_time_info_to_msg_format_args("redirect") msg = fmt.format(**msg_format_args) _logger.info(msg) # # check for errors ... # if response.code != self.expected_response_code: if response.code == httplib.CONFLICT: self._call_callback(False, True) return fmt = ( "CouchDB responded to %s on %s " "with HTTP response %d but expected %d" ) _logger.error( fmt, response.request.method, response.effective_url, response.code, self.expected_response_code) self._call_callback(False, False) return if response.error: _logger.error( "CouchDB responded to %s on %s with error '%s'", response.request.method, response.effective_url, response.error) self._call_callback(False, False) return # # process response body ... # # # CouchDB always returns response.body (a string) - let's convert the # body to a dict so we can operate on it more effectively # response_body = json.loads(response.body) if response.body else {} # # the response body either contains a bunch of documents that # need to be converted to model objects or a single document # if not self.create_model_from_doc: self._call_callback( True, # is_ok False, # is_conflict response_body, response_body.get("id", None), response_body.get("rev", None)) return if self.expect_one_document: model = self._check_doc_for_tampering_and_if_ok_create_model(response_body) self._call_callback( model is not None, False, # is_conflict model) return models = [] for row in response_body.get("rows", []): doc = row.get("doc", {}) model = self._check_doc_for_tampering_and_if_ok_create_model(doc) if model is not None: models.append(model) self._call_callback( True, # is_ok False, # is_conflict models) def _check_doc_for_tampering_and_if_ok_create_model(self, doc): if tampering_signer: if not tamper.verify(tampering_signer, doc): _logger.error( "tampering detected in doc '%s'", doc["_id"]) return None return self.create_model_from_doc(doc) def _call_callback(self, is_ok, is_conflict, model_models_or_response_body=None, _id=None, _rev=None): assert self._callback is not None self._callback( is_ok, is_conflict, model_models_or_response_body, _id, _rev, self) self._callback = None class AsyncAction(object): """Abstract base class for all async actions.""" def __init__(self, async_state): object.__init__(self) self.async_state = async_state class AsyncModelRetrieverByDocumentID(AsyncAction): """Async'ly retrieve a model from the CouchDB database by document ID. """ def __init__(self, document_id, async_state): AsyncAction.__init__(self, async_state) self.document_id = document_id self._callback = None def fetch(self, callback): assert self._callback is None self._callback = callback request = CouchDBAsyncHTTPRequest(self.document_id, 'GET', None) cac = CouchDBAsyncHTTPClient( httplib.OK, # expected_response_code self.create_model_from_doc, True) # expect_one_document cac.fetch(request, self._on_cac_fetch_done) def _on_cac_fetch_done(self, is_ok, is_conflict, model, _id, _rev, cac): assert is_conflict is False self._call_callback(is_ok, model) def create_model_from_doc(self, doc): """Concrete classes derived from this class must implement this method which takes a dictionary (```doc```) and creates a model instance. """ raise NotImplementedError() def _call_callback(self, is_ok, model=None): assert self._callback self._callback(is_ok, model, self) self._callback = None class BaseAsyncModelRetriever(AsyncAction): def __init__(self, async_state): AsyncAction.__init__(self, async_state) self._callback = None def fetch(self, callback): assert self._callback is None self._callback = callback # # useful when trying to figure out URL encodings # # http://meyerweb.com/eric/tools/dencoder/ # # and when thinking about array keys # # http://stackoverflow.com/questions/9687297/couchdb-search-or-filtering-on-key-array # path_fmt = '_design/%s/_view/%s?%s' query_string_key_value_pairs = self.get_query_string_key_value_pairs() query_string = urllib.urlencode(query_string_key_value_pairs) # :ASSUMPTION: that design docs and views are called the same thing # ie one view per design doc path = path_fmt % (self.design_doc, self.design_doc, query_string) request = CouchDBAsyncHTTPRequest(path, "GET", None) cac = CouchDBAsyncHTTPClient(httplib.OK, self.create_model_from_doc) cac.fetch(request, self.on_cac_fetch_done) def get_query_string_key_value_pairs(self): """This method is only called by ```fetch()``` to get the key value pairs that will be used to construct the query string in the request to CouchDB. The presence of this method was first introduced when support for "most recent document" type queries was required. Below is an example implementation for a rather complex scaneario where the keys are composite and we have a "most recent" style query type. def get_query_string_key_value_pairs(self): endkey = json.dumps([self._user.network_id, self._user.user_id]) startkey = json.dumps([self._user.network_id, self._user.user_id, {}]) return { "include_docs": "true", "limit": 1, "descending": "true", "endkey": endkey, "startkey": startkey, } Below is the JavaScript design document that would support the above implementation. { "language": "javascript", "views": { "member_details_by_network_id_user_id_and_created_on": { "map": "function(doc) { if (doc.type.match(/^member_details_v\\d+.\\d+/i)) { emit([doc.network_id, doc.user_id, doc.created_on], null) } }" } } } Note - all timestamps are expected to be represented as strings with the format YYYY-MM-DDTHH:MM:SS.MMMMMM+00:00 which is important because with this format sorting strings that are actually dates will work as you expect """ raise NotImplementedError() def on_cac_fetch_done(self, is_ok, is_conflict, models, _id, _rev, cac): raise NotImplementedError() def create_model_from_doc(self, doc): """Concrete classes derived from this class must implement this method which takes a dictionary (```doc```) and creates a model instance. """ raise NotImplementedError() class AsyncModelRetriever(BaseAsyncModelRetriever): """Async'ly retrieve a model from the CouchDB database.""" def __init__(self, design_doc, key, async_state): BaseAsyncModelRetriever.__init__(self, async_state) self.design_doc = design_doc self.key = key self._callback = None def get_query_string_key_value_pairs(self): return { "include_docs": "true", "key": json.dumps(self.key), } def on_cac_fetch_done(self, is_ok, is_conflict, models, _id, _rev, cac): assert is_conflict is False model = models[0] if models else None self._call_callback(is_ok, model) def _call_callback(self, is_ok, model=None): assert self._callback self._callback(is_ok, model, self) self._callback = None class AsyncModelsRetriever(BaseAsyncModelRetriever): """Async'ly retrieve a collection of models from CouchDB.""" def __init__(self, design_doc, start_key=None, end_key=None, async_state=None): BaseAsyncModelRetriever.__init__(self, async_state) self.design_doc = design_doc self.start_key = start_key self.end_key = end_key self._callback = None def get_query_string_key_value_pairs(self): query_params = { "include_docs": "true", } if self.start_key: query_params['startkey'] = json.dumps(self.start_key) if self.end_key: query_params['endkey'] = json.dumps(self.end_key) return query_params def transform_models(self, models): """By default the ```callback``` in ```fetch``` will recieve a list of models. Sometimes is useful to do a transformation on the list and have ```callback``` recieve the transformation rather than the list. For example, let's say you had a collection of models with the property x and you wanted to return a dictionary keyed by x. In this scenario you'd augment the derived class of ```AsyncModelsRetriever``` with something like: def transform_models(self, models): return {model.x: model for model in models} Don't expect this to be used too much but very useful when it is used:-) """ return models def on_cac_fetch_done(self, is_ok, is_conflict, models, _id, _rev, cac): assert is_conflict is False self._call_callback(is_ok, models) def _call_callback(self, is_ok, models=None): assert self._callback is not None self._callback(is_ok, self.transform_models(models), self) self._callback = None class InvalidTypeInDocForStoreException(Exception): """This exception is raised by ```AsyncPersister``` when a call to a model's as_doc_for_store() generates a doc with an invalid type property. """ def __init__(self, model): msg_fmt = ( "invalid 'type' in doc for store - " "see '%s.as_doc_for_store()" ) msg = msg_fmt % type(model) Exception.__init__(self, msg) class AsyncPersister(AsyncAction): """Async'ly persist a model object.""" """```_doc_type_reg_ex``` is used to verify the format of the type property for each document before the document is written to the store. """ _doc_type_reg_ex = re.compile( r"^[^\s]+_v\d+\.\d+$", re.IGNORECASE) def __init__(self, model, model_as_doc_for_store_args, async_state): AsyncAction.__init__(self, async_state) self.model = model self.model_as_doc_for_store_args = model_as_doc_for_store_args self._callback = None def persist(self, callback): assert not self._callback self._callback = callback model_as_doc_for_store = self.model.as_doc_for_store(*self.model_as_doc_for_store_args) # # this check is important because the conflict resolution # logic relies on being able to extract the type name from # a document read from the store # if not type(self)._doc_type_reg_ex.match(model_as_doc_for_store['type']): raise InvalidTypeInDocForStoreException(self.model) if '_id' in model_as_doc_for_store: path = model_as_doc_for_store['_id'] method = 'PUT' else: path = '' method = 'POST' request = CouchDBAsyncHTTPRequest(path, method, model_as_doc_for_store) cac = CouchDBAsyncHTTPClient(httplib.CREATED, None) cac.fetch(request, self._on_cac_fetch_done) def _on_cac_fetch_done(self, is_ok, is_conflict, models, _id, _rev, cac): """```self.model``` has just been written to a CouchDB database which means ```self.model```'s _id and _rev properties might be out of sync with the _id and _rev properties in CouchDB since CouchDB generates these values. The "if _id" and "if _rev" sections of code below unite the in-memory view and the CouchDB view of this object/document. """ if _id is not None: self.model._id = _id if _rev is not None: self.model._rev = _rev self._call_callback(is_ok, is_conflict) def _call_callback(self, is_ok, is_conflict): assert self._callback is not None assert (is_ok and not is_conflict) or (not is_ok) self._callback(is_ok, is_conflict, self) self._callback = None class AsyncDeleter(AsyncAction): """Async'ly delete a model object.""" def __init__(self, model, async_state=None): AsyncAction.__init__(self, async_state) self.model = model self._callback = None def delete(self, callback): assert not self._callback self._callback = callback if not self.model._id or not self.model._rev: self._call_callback(False, False) return path = "%s?rev=%s" % (self.model._id, self.model._rev) request = CouchDBAsyncHTTPRequest(path, "DELETE", None) cac = CouchDBAsyncHTTPClient(httplib.OK, None) cac.fetch(request, self._on_cac_fetch_done) def _on_cac_fetch_done(self, is_ok, is_conflict, models, _id, _rev, cac): self._call_callback(is_ok, is_conflict) def _call_callback(self, is_ok, is_conflict): assert self._callback is not None assert (is_ok and not is_conflict) or (not is_ok) self._callback(is_ok, is_conflict, self) self._callback = None class AsyncCouchDBHealthCheck(AsyncAction): """Async'ly confirm CouchDB can be reached.""" def __init__(self, async_state=None): AsyncAction.__init__(self, async_state) self._callback = None def check(self, callback): assert not self._callback self._callback = callback request = CouchDBAsyncHTTPRequest("", "GET", None) cac = CouchDBAsyncHTTPClient(httplib.OK, None) cac.fetch(request, self._on_cac_db_fetch_done) def _on_cac_db_fetch_done(self, is_ok, is_conflict, response_body, _id, _rev, cac): self._call_callback(is_ok) def _call_callback(self, is_ok): assert self._callback is not None self._callback(is_ok, self) self._callback = None class ViewMetrics(object): """An instance of this class contains metrics which describe both the shape and health of a view in a CouchDB databse. Instances of this class are created by ```AsyncViewMetricsRetriever```. """ def __init__(self, design_doc, data_size, disk_size): object.__init__(self) self.design_doc = design_doc self.data_size = data_size self.disk_size = disk_size @property def fragmentation(self): """The view's fragmentation as an integer percentage.""" return _fragmentation(self.data_size, self.disk_size) class DatabaseMetrics(object): """An instance of this class contains metrics which describe both the shape and health of a CouchDB databse. Instances of this class are created by ```AsyncDatabaseMetricsRetriever```. """ def __init__(self, database, doc_count, data_size, disk_size, view_metrics): object.__init__(self) self.database = database self.doc_count = doc_count self.data_size = data_size self.disk_size = disk_size self.view_metrics = view_metrics @property def fragmentation(self): """The database's fragmentation as an integer percentage.""" return _fragmentation(self.data_size, self.disk_size) class AsyncDatabaseMetricsRetriever(AsyncAction): """Async'ly retrieve metrics for the CouchDB database.""" # FDD = Fetch Failure Details FFD_OK = 0x0000 FFD_ERROR = 0x0080 FFD_ERROR_TALKING_TO_COUCHDB = FFD_ERROR | 0x0001 FFD_ERROR_GETTING_VIEW_METRICS = FFD_ERROR | 0x0002 def __init__(self, async_state=None): AsyncAction.__init__(self, async_state) self.fetch_failure_detail = None self._callback = None def fetch(self, callback): assert not self._callback self._callback = callback request = CouchDBAsyncHTTPRequest("", "GET", None) cac = CouchDBAsyncHTTPClient(httplib.OK, None) cac.fetch(request, self._on_cac_db_fetch_done) def _on_cac_db_fetch_done(self, is_ok, is_conflict, response_body, _id, _rev, acdba): assert is_conflict is False if not is_ok: self._call_callback(type(self).FFD_ERROR_TALKING_TO_COUCHDB) return async_state = ( response_body.get("doc_count"), response_body.get("data_size"), response_body.get("disk_size"), ) aaddmr = AsyncAllViewMetricsRetriever(async_state) aaddmr.fetch(self._on_aaddmr_fetch_done) def _on_aaddmr_fetch_done(self, is_ok, view_metrics, aaddmr): if not is_ok: self._call_callback(type(self).FFD_ERROR_GETTING_VIEW_METRICS) return (doc_count, data_size, disk_size) = aaddmr.async_state database_metrics = DatabaseMetrics( database, doc_count, data_size, disk_size, view_metrics) self._call_callback(type(self).FFD_OK, database_metrics) def _call_callback(self, fetch_failure_detail, database_metrics=None): assert self._callback is not None assert self.fetch_failure_detail is None self.fetch_failure_detail = fetch_failure_detail is_ok = not bool(self.fetch_failure_detail & type(self).FFD_ERROR) self._callback( is_ok, database_metrics if is_ok else None, self) self._callback = None class AsyncAllViewMetricsRetriever(AsyncAction): """Async'ly retrieve metrics for all views in a database.""" # FDD = Fetch Failure Details FFD_OK = 0x0000 FFD_ERROR = 0x0080 FFD_ERROR_TALKING_TO_COUCHDB = FFD_ERROR | 0x0001 FFD_ERROR_FETCHING_VIEW_METRICS = FFD_ERROR | 0x0002 FFD_NO_DESIGN_DOCS_IN_DATABASE = 0x0003 def __init__(self, async_state=None): AsyncAction.__init__(self, async_state) self.fetch_failure_detail = None self._todo = [] self._done = [] self._callback = None def fetch(self, callback): assert not self._callback self._callback = callback # # try something like this to get a sense of the response # # curl 'http://127.0.0.1:5984/database/_all_docs?startkey="_design"&endkey="_design0"' | python -m json.tool # # and just remember to replace "database" in the above request. the # output will look something like: # # { # "offset": 2, # "rows": [ # { # "id": "_design/fruit_by_fruit_id", # "key": "_design/fruit_by_fruit_id", # "value": { # "rev": "1-446934fdc8cd18bee9db4b9df095074a" # } # } # ], # "total_rows": 3 # } # path = '_all_docs?startkey="_design"&endkey="_design0"' request = CouchDBAsyncHTTPRequest(path, "GET", None) cac = CouchDBAsyncHTTPClient(httplib.OK, None) cac.fetch(request, self._on_cac_fetch_done) def _on_cac_fetch_done(self, is_ok, is_conflict, response_body, _id, _rev, acdba): assert is_conflict is False if not is_ok: self._call_callback(type(self).FFD_ERROR_TALKING_TO_COUCHDB) return rows = response_body.get("rows", []) if not rows: self._call_callback(type(self).FFD_NO_DESIGN_DOCS_IN_DATABASE) return for row in rows: design_doc = row["key"].split("/")[1] self._todo.append(design_doc) avmr = AsyncViewMetricsRetriever(design_doc) avmr.fetch(self._on_avmr_fetch_done) def _on_avmr_fetch_done(self, is_ok, view_metrics, avmr): if not is_ok: self._call_callback(type(self).FFD_ERROR_FETCHING_VIEW_METRICS) return self._todo.remove(view_metrics.design_doc) self._done.append(view_metrics) self._call_callback(type(self).FFD_OK) def _call_callback(self, fetch_failure_detail): if not self._callback: # results have already been sent to caller even though # we're still getting responses back from CouchDB return is_ok = not bool(fetch_failure_detail & type(self).FFD_ERROR) if not is_ok: assert self.fetch_failure_detail is None self.fetch_failure_detail = fetch_failure_detail self._callback(False, None, self) self._callback = None return if self._todo: return assert self.fetch_failure_detail is None self.fetch_failure_detail = fetch_failure_detail self._callback(True, self._done, self) self._callback = None class AsyncViewMetricsRetriever(AsyncAction): """Async'ly retrieve metrics for a single view.""" # FDD = Fetch Failure Details FFD_OK = 0x0000 FFD_ERROR = 0x0080 FFD_ERROR_TALKING_TO_COUCHDB = FFD_ERROR | 0x0001 FFD_INVALID_RESPONSE_BODY = 0x0002 def __init__(self, design_doc, async_state=None): AsyncAction.__init__(self, async_state) self.design_doc = design_doc self.fetch_failure_detail = None self._callback = None def fetch(self, callback): assert not self._callback self._callback = callback path = '_design/%s/_info' % self.design_doc request = CouchDBAsyncHTTPRequest(path, "GET", None) cac = CouchDBAsyncHTTPClient(httplib.OK, None) cac.fetch(request, self._on_cac_fetch_done) def _on_cac_fetch_done(self, is_ok, is_conflict, response_body, _id, _rev, cac): assert is_conflict is False if not is_ok: self._call_callback(type(self).FFD_ERROR_TALKING_TO_COUCHDB) return view_index = response_body.get('view_index', {}) data_size = view_index.get('data_size') disk_size = view_index.get('disk_size') cls = type(self) self._call_callback( cls.FFD_OK if data_size is not None and disk_size is not None else cls.FFD_INVALID_RESPONSE_BODY, data_size, disk_size) def _call_callback(self, fetch_failure_detail, data_size=None, disk_size=None): assert self._callback is not None assert self.fetch_failure_detail is None self.fetch_failure_detail = fetch_failure_detail is_ok = not bool(self.fetch_failure_detail & type(self).FFD_ERROR) self._callback( is_ok, ViewMetrics(self.design_doc, data_size, disk_size) if is_ok else None, self) self._callback = None
c0defreak/python-for-android
refs/heads/master
python-build/python-libs/gdata/build/lib/gdata/calendar/__init__.py
135
#!/usr/bin/python # # Copyright (C) 2006 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains extensions to ElementWrapper objects used with Google Calendar.""" __author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)' try: from xml.etree import cElementTree as ElementTree except ImportError: try: import cElementTree as ElementTree except ImportError: try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree import atom import gdata # XML namespaces which are often used in Google Calendar entities. GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005' GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s' WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent') GACL_NAMESPACE = gdata.GACL_NAMESPACE GACL_TEMPLATE = gdata.GACL_TEMPLATE class ValueAttributeContainer(atom.AtomBase): """A parent class for all Calendar classes which have a value attribute. Children include Color, AccessLevel, Hidden """ _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' def __init__(self, value=None, extension_elements=None, extension_attributes=None, text=None): self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class Color(ValueAttributeContainer): """The Google Calendar color element""" _tag = 'color' _namespace = GCAL_NAMESPACE _children = ValueAttributeContainer._children.copy() _attributes = ValueAttributeContainer._attributes.copy() class AccessLevel(ValueAttributeContainer): """The Google Calendar accesslevel element""" _tag = 'accesslevel' _namespace = GCAL_NAMESPACE _children = ValueAttributeContainer._children.copy() _attributes = ValueAttributeContainer._attributes.copy() class Hidden(ValueAttributeContainer): """The Google Calendar hidden element""" _tag = 'hidden' _namespace = GCAL_NAMESPACE _children = ValueAttributeContainer._children.copy() _attributes = ValueAttributeContainer._attributes.copy() class Selected(ValueAttributeContainer): """The Google Calendar selected element""" _tag = 'selected' _namespace = GCAL_NAMESPACE _children = ValueAttributeContainer._children.copy() _attributes = ValueAttributeContainer._attributes.copy() class Timezone(ValueAttributeContainer): """The Google Calendar timezone element""" _tag = 'timezone' _namespace = GCAL_NAMESPACE _children = ValueAttributeContainer._children.copy() _attributes = ValueAttributeContainer._attributes.copy() class Where(atom.AtomBase): """The Google Calendar Where element""" _tag = 'where' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['valueString'] = 'value_string' def __init__(self, value_string=None, extension_elements=None, extension_attributes=None, text=None): self.value_string = value_string self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder): """A Google Calendar meta Entry flavor of an Atom Entry """ _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}color' % GCAL_NAMESPACE] = ('color', Color) _children['{%s}accesslevel' % GCAL_NAMESPACE] = ('access_level', AccessLevel) _children['{%s}hidden' % GCAL_NAMESPACE] = ('hidden', Hidden) _children['{%s}selected' % GCAL_NAMESPACE] = ('selected', Selected) _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, color=None, access_level=None, hidden=None, timezone=None, selected=None, where=None, extension_elements=None, extension_attributes=None, text=None): gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated, text=None) self.color = color self.access_level = access_level self.hidden = hidden self.selected = selected self.timezone = timezone self.where = where class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder): """A Google Calendar meta feed flavor of an Atom Feed""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarListEntry]) class Scope(atom.AtomBase): """The Google ACL scope element""" _tag = 'scope' _namespace = GACL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' _attributes['type'] = 'type' def __init__(self, extension_elements=None, value=None, scope_type=None, extension_attributes=None, text=None): self.value = value self.type = scope_type self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class Role(ValueAttributeContainer): """The Google Calendar timezone element""" _tag = 'role' _namespace = GACL_NAMESPACE _children = ValueAttributeContainer._children.copy() _attributes = ValueAttributeContainer._attributes.copy() class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder): """A Google Calendar ACL Entry flavor of an Atom Entry """ _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}scope' % GACL_NAMESPACE] = ('scope', Scope) _children['{%s}role' % GACL_NAMESPACE] = ('role', Role) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, scope=None, role=None, extension_elements=None, extension_attributes=None, text=None): gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated, text=None) self.scope = scope self.role = role class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder): """A Google Calendar ACL feed flavor of an Atom Feed""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarAclEntry]) class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder): """A Google Calendar event comments entry flavor of an Atom Entry""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder): """A Google Calendar event comments feed flavor of an Atom Feed""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarEventCommentEntry]) class ExtendedProperty(gdata.ExtendedProperty): """A transparent subclass of gdata.ExtendedProperty added to this module for backwards compatibility.""" class Reminder(atom.AtomBase): """The Google Calendar reminder element""" _tag = 'reminder' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['absoluteTime'] = 'absolute_time' _attributes['days'] = 'days' _attributes['hours'] = 'hours' _attributes['minutes'] = 'minutes' _attributes['method'] = 'method' def __init__(self, absolute_time=None, days=None, hours=None, minutes=None, method=None, extension_elements=None, extension_attributes=None, text=None): self.absolute_time = absolute_time if days is not None: self.days = str(days) else: self.days = None if hours is not None: self.hours = str(hours) else: self.hours = None if minutes is not None: self.minutes = str(minutes) else: self.minutes = None self.method = method self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class When(atom.AtomBase): """The Google Calendar When element""" _tag = 'when' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) _attributes['startTime'] = 'start_time' _attributes['endTime'] = 'end_time' def __init__(self, start_time=None, end_time=None, reminder=None, extension_elements=None, extension_attributes=None, text=None): self.start_time = start_time self.end_time = end_time self.reminder = reminder or [] self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class Recurrence(atom.AtomBase): """The Google Calendar Recurrence element""" _tag = 'recurrence' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() class UriEnumElement(atom.AtomBase): _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() def __init__(self, tag, enum_map, attrib_name='value', extension_elements=None, extension_attributes=None, text=None): self.tag=tag self.enum_map=enum_map self.attrib_name=attrib_name self.value=None self.text=text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def findKey(self, value): res=[item[0] for item in self.enum_map.items() if item[1] == value] if res is None or len(res) == 0: return None return res[0] def _ConvertElementAttributeToMember(self, attribute, value): # Special logic to use the enum_map to set the value of the object's value member. if attribute == self.attrib_name and value != '': self.value = self.enum_map[value] return # Find the attribute in this class's list of attributes. if self.__class__._attributes.has_key(attribute): # Find the member of this class which corresponds to the XML attribute # (lookup in current_class._attributes) and set this member to the # desired value (using self.__dict__). setattr(self, self.__class__._attributes[attribute], value) else: # The current class doesn't map this attribute, so try to parent class. atom.ExtensionContainer._ConvertElementAttributeToMember(self, attribute, value) def _AddMembersToElementTree(self, tree): # Convert the members of this class which are XML child nodes. # This uses the class's _children dictionary to find the members which # should become XML child nodes. member_node_names = [values[0] for tag, values in self.__class__._children.iteritems()] for member_name in member_node_names: member = getattr(self, member_name) if member is None: pass elif isinstance(member, list): for instance in member: instance._BecomeChildElement(tree) else: member._BecomeChildElement(tree) # Special logic to set the desired XML attribute. key = self.findKey(self.value) if key is not None: tree.attrib[self.attrib_name]=key # Convert the members of this class which are XML attributes. for xml_attribute, member_name in self.__class__._attributes.iteritems(): member = getattr(self, member_name) if member is not None: tree.attrib[xml_attribute] = member # Lastly, call the parent's _AddMembersToElementTree to get any # extension elements. atom.ExtensionContainer._AddMembersToElementTree(self, tree) class AttendeeStatus(UriEnumElement): """The Google Calendar attendeeStatus element""" _tag = 'attendeeStatus' _namespace = gdata.GDATA_NAMESPACE _children = UriEnumElement._children.copy() _attributes = UriEnumElement._attributes.copy() attendee_enum = { 'http://schemas.google.com/g/2005#event.accepted' : 'ACCEPTED', 'http://schemas.google.com/g/2005#event.declined' : 'DECLINED', 'http://schemas.google.com/g/2005#event.invited' : 'INVITED', 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} def __init__(self, extension_elements=None, extension_attributes=None, text=None): UriEnumElement.__init__(self, 'attendeeStatus', AttendeeStatus.attendee_enum, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class AttendeeType(UriEnumElement): """The Google Calendar attendeeType element""" _tag = 'attendeeType' _namespace = gdata.GDATA_NAMESPACE _children = UriEnumElement._children.copy() _attributes = UriEnumElement._attributes.copy() attendee_type_enum = { 'http://schemas.google.com/g/2005#event.optional' : 'OPTIONAL', 'http://schemas.google.com/g/2005#event.required' : 'REQUIRED' } def __init__(self, extension_elements=None, extension_attributes=None, text=None): UriEnumElement.__init__(self, 'attendeeType', AttendeeType.attendee_type_enum, extension_elements=extension_elements, extension_attributes=extension_attributes,text=text) class Visibility(UriEnumElement): """The Google Calendar Visibility element""" _tag = 'visibility' _namespace = gdata.GDATA_NAMESPACE _children = UriEnumElement._children.copy() _attributes = UriEnumElement._attributes.copy() visibility_enum = { 'http://schemas.google.com/g/2005#event.confidential' : 'CONFIDENTIAL', 'http://schemas.google.com/g/2005#event.default' : 'DEFAULT', 'http://schemas.google.com/g/2005#event.private' : 'PRIVATE', 'http://schemas.google.com/g/2005#event.public' : 'PUBLIC' } def __init__(self, extension_elements=None, extension_attributes=None, text=None): UriEnumElement.__init__(self, 'visibility', Visibility.visibility_enum, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class Transparency(UriEnumElement): """The Google Calendar Transparency element""" _tag = 'transparency' _namespace = gdata.GDATA_NAMESPACE _children = UriEnumElement._children.copy() _attributes = UriEnumElement._attributes.copy() transparency_enum = { 'http://schemas.google.com/g/2005#event.opaque' : 'OPAQUE', 'http://schemas.google.com/g/2005#event.transparent' : 'TRANSPARENT' } def __init__(self, extension_elements=None, extension_attributes=None, text=None): UriEnumElement.__init__(self, tag='transparency', enum_map=Transparency.transparency_enum, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class Comments(atom.AtomBase): """The Google Calendar comments element""" _tag = 'comments' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', gdata.FeedLink) _attributes['rel'] = 'rel' def __init__(self, rel=None, feed_link=None, extension_elements=None, extension_attributes=None, text=None): self.rel = rel self.feed_link = feed_link self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class EventStatus(UriEnumElement): """The Google Calendar eventStatus element""" _tag = 'eventStatus' _namespace = gdata.GDATA_NAMESPACE _children = UriEnumElement._children.copy() _attributes = UriEnumElement._attributes.copy() status_enum = { 'http://schemas.google.com/g/2005#event.canceled' : 'CANCELED', 'http://schemas.google.com/g/2005#event.confirmed' : 'CONFIRMED', 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} def __init__(self, extension_elements=None, extension_attributes=None, text=None): UriEnumElement.__init__(self, tag='eventStatus', enum_map=EventStatus.status_enum, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class Who(UriEnumElement): """The Google Calendar Who element""" _tag = 'who' _namespace = gdata.GDATA_NAMESPACE _children = UriEnumElement._children.copy() _attributes = UriEnumElement._attributes.copy() _children['{%s}attendeeStatus' % gdata.GDATA_NAMESPACE] = ( 'attendee_status', AttendeeStatus) _children['{%s}attendeeType' % gdata.GDATA_NAMESPACE] = ('attendee_type', AttendeeType) _attributes['valueString'] = 'name' _attributes['email'] = 'email' relEnum = { 'http://schemas.google.com/g/2005#event.attendee' : 'ATTENDEE', 'http://schemas.google.com/g/2005#event.organizer' : 'ORGANIZER', 'http://schemas.google.com/g/2005#event.performer' : 'PERFORMER', 'http://schemas.google.com/g/2005#event.speaker' : 'SPEAKER', 'http://schemas.google.com/g/2005#message.bcc' : 'BCC', 'http://schemas.google.com/g/2005#message.cc' : 'CC', 'http://schemas.google.com/g/2005#message.from' : 'FROM', 'http://schemas.google.com/g/2005#message.reply-to' : 'REPLY_TO', 'http://schemas.google.com/g/2005#message.to' : 'TO' } def __init__(self, name=None, email=None, attendee_status=None, attendee_type=None, rel=None, extension_elements=None, extension_attributes=None, text=None): UriEnumElement.__init__(self, 'who', Who.relEnum, attrib_name='rel', extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) self.name = name self.email = email self.attendee_status = attendee_status self.attendee_type = attendee_type self.rel = rel class OriginalEvent(atom.AtomBase): """The Google Calendar OriginalEvent element""" _tag = 'originalEvent' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() # TODO: The when tag used to map to a EntryLink, make sure it should really be a When. _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When) _attributes['id'] = 'id' _attributes['href'] = 'href' def __init__(self, id=None, href=None, when=None, extension_elements=None, extension_attributes=None, text=None): self.id = id self.href = href self.when = when self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def GetCalendarEventEntryClass(): return CalendarEventEntry # This class is not completely defined here, because of a circular reference # in which CalendarEventEntryLink and CalendarEventEntry refer to one another. class CalendarEventEntryLink(gdata.EntryLink): """An entryLink which contains a calendar event entry Within an event's recurranceExceptions, an entry link points to a calendar event entry. This class exists to capture the calendar specific extensions in the entry. """ _tag = 'entryLink' _namespace = gdata.GDATA_NAMESPACE _children = gdata.EntryLink._children.copy() _attributes = gdata.EntryLink._attributes.copy() # The CalendarEventEntryLink should like CalendarEventEntry as a child but # that class hasn't been defined yet, so we will wait until after defining # CalendarEventEntry to list it in _children. class RecurrenceException(atom.AtomBase): """The Google Calendar RecurrenceException element""" _tag = 'recurrenceException' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ('entry_link', CalendarEventEntryLink) _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', OriginalEvent) _attributes['specialized'] = 'specialized' def __init__(self, specialized=None, entry_link=None, original_event=None, extension_elements=None, extension_attributes=None, text=None): self.specialized = specialized self.entry_link = entry_link self.original_event = original_event self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class SendEventNotifications(atom.AtomBase): """The Google Calendar sendEventNotifications element""" _tag = 'sendEventNotifications' _namespace = GCAL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' def __init__(self, extension_elements=None, value=None, extension_attributes=None, text=None): self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class QuickAdd(atom.AtomBase): """The Google Calendar quickadd element""" _tag = 'quickadd' _namespace = GCAL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' def __init__(self, extension_elements=None, value=None, extension_attributes=None, text=None): self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def _TransferToElementTree(self, element_tree): if self.value: element_tree.attrib['value'] = self.value element_tree.tag = GCAL_TEMPLATE % 'quickadd' atom.AtomBase._TransferToElementTree(self, element_tree) return element_tree def _TakeAttributeFromElementTree(self, attribute, element_tree): if attribute == 'value': self.value = element_tree.attrib[attribute] del element_tree.attrib[attribute] else: atom.AtomBase._TakeAttributeFromElementTree(self, attribute, element_tree) class Sequence(atom.AtomBase): _tag = 'sequence' _namespace = GCAL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' def __init__(self, value=None, extension_elements=None, extension_attributes=None, text=None): self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class WebContentGadgetPref(atom.AtomBase): _tag = 'webContentGadgetPref' _namespace = GCAL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['name'] = 'name' _attributes['value'] = 'value' """The Google Calendar Web Content Gadget Preferences element""" def __init__(self, name=None, value=None, extension_elements=None, extension_attributes=None, text=None): self.name = name self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class WebContent(atom.AtomBase): _tag = 'webContent' _namespace = GCAL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _children['{%s}webContentGadgetPref' % GCAL_NAMESPACE] = ('gadget_pref', [WebContentGadgetPref]) _attributes['url'] = 'url' _attributes['width'] = 'width' _attributes['height'] = 'height' def __init__(self, url=None, width=None, height=None, text=None, gadget_pref=None, extension_elements=None, extension_attributes=None): self.url = url self.width = width self.height = height self.text = text self.gadget_pref = gadget_pref or [] self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class WebContentLink(atom.Link): _tag = 'link' _namespace = atom.ATOM_NAMESPACE _children = atom.Link._children.copy() _attributes = atom.Link._attributes.copy() _children['{%s}webContent' % GCAL_NAMESPACE] = ('web_content', WebContent) def __init__(self, title=None, href=None, link_type=None, web_content=None): atom.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href, link_type=link_type) self.web_content = web_content class CalendarEventEntry(gdata.BatchEntry): """A Google Calendar flavor of an Atom Entry """ _tag = gdata.BatchEntry._tag _namespace = gdata.BatchEntry._namespace _children = gdata.BatchEntry._children.copy() _attributes = gdata.BatchEntry._attributes.copy() # This class also contains WebContentLinks but converting those members # is handled in a special version of _ConvertElementTreeToMember. _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', [Where]) _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', [When]) _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', [Who]) _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( 'extended_property', [ExtendedProperty]) _children['{%s}visibility' % gdata.GDATA_NAMESPACE] = ('visibility', Visibility) _children['{%s}transparency' % gdata.GDATA_NAMESPACE] = ('transparency', Transparency) _children['{%s}eventStatus' % gdata.GDATA_NAMESPACE] = ('event_status', EventStatus) _children['{%s}recurrence' % gdata.GDATA_NAMESPACE] = ('recurrence', Recurrence) _children['{%s}recurrenceException' % gdata.GDATA_NAMESPACE] = ( 'recurrence_exception', [RecurrenceException]) _children['{%s}sendEventNotifications' % GCAL_NAMESPACE] = ( 'send_event_notifications', SendEventNotifications) _children['{%s}quickadd' % GCAL_NAMESPACE] = ('quick_add', QuickAdd) _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', OriginalEvent) _children['{%s}sequence' % GCAL_NAMESPACE] = ('sequence', Sequence) _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, transparency=None, comments=None, event_status=None, send_event_notifications=None, visibility=None, recurrence=None, recurrence_exception=None, where=None, when=None, who=None, quick_add=None, extended_property=None, original_event=None, batch_operation=None, batch_id=None, batch_status=None, sequence=None, reminder=None, extension_elements=None, extension_attributes=None, text=None): gdata.BatchEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, batch_operation=batch_operation, batch_id=batch_id, batch_status=batch_status, title=title, updated=updated) self.transparency = transparency self.comments = comments self.event_status = event_status self.send_event_notifications = send_event_notifications self.visibility = visibility self.recurrence = recurrence self.recurrence_exception = recurrence_exception or [] self.where = where or [] self.when = when or [] self.who = who or [] self.quick_add = quick_add self.extended_property = extended_property or [] self.original_event = original_event self.sequence = sequence self.reminder = reminder or [] self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} # We needed to add special logic to _ConvertElementTreeToMember because we # want to make links with a rel of WEB_CONTENT_LINK_REL into a # WebContentLink def _ConvertElementTreeToMember(self, child_tree): # Special logic to handle Web Content links if (child_tree.tag == '{%s}link' % atom.ATOM_NAMESPACE and child_tree.attrib['rel'] == WEB_CONTENT_LINK_REL): if self.link is None: self.link = [] self.link.append(atom._CreateClassFromElementTree(WebContentLink, child_tree)) return # Find the element's tag in this class's list of child members if self.__class__._children.has_key(child_tree.tag): member_name = self.__class__._children[child_tree.tag][0] member_class = self.__class__._children[child_tree.tag][1] # If the class member is supposed to contain a list, make sure the # matching member is set to a list, then append the new member # instance to the list. if isinstance(member_class, list): if getattr(self, member_name) is None: setattr(self, member_name, []) getattr(self, member_name).append(atom._CreateClassFromElementTree( member_class[0], child_tree)) else: setattr(self, member_name, atom._CreateClassFromElementTree(member_class, child_tree)) else: atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) def GetWebContentLink(self): """Finds the first link with rel set to WEB_CONTENT_REL Returns: A gdata.calendar.WebContentLink or none if none of the links had rel equal to WEB_CONTENT_REL """ for a_link in self.link: if a_link.rel == WEB_CONTENT_LINK_REL: return a_link return None def CalendarEventEntryFromString(xml_string): return atom.CreateClassFromXMLString(CalendarEventEntry, xml_string) def CalendarEventCommentEntryFromString(xml_string): return atom.CreateClassFromXMLString(CalendarEventCommentEntry, xml_string) CalendarEventEntryLink._children = {'{%s}entry' % atom.ATOM_NAMESPACE: ('entry', CalendarEventEntry)} def CalendarEventEntryLinkFromString(xml_string): return atom.CreateClassFromXMLString(CalendarEventEntryLink, xml_string) class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder): """A Google Calendar event feed flavor of an Atom Feed""" _tag = gdata.BatchFeed._tag _namespace = gdata.BatchFeed._namespace _children = gdata.BatchFeed._children.copy() _attributes = gdata.BatchFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarEventEntry]) _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) def __init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, interrupted=None, timezone=None, extension_elements=None, extension_attributes=None, text=None): gdata.BatchFeed.__init__(self, author=author, category=category, contributor=contributor, generator=generator, icon=icon, atom_id=atom_id, link=link, logo=logo, rights=rights, subtitle=subtitle, title=title, updated=updated, entry=entry, total_results=total_results, start_index=start_index, items_per_page=items_per_page, interrupted=interrupted, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) self.timezone = timezone def CalendarListEntryFromString(xml_string): return atom.CreateClassFromXMLString(CalendarListEntry, xml_string) def CalendarAclEntryFromString(xml_string): return atom.CreateClassFromXMLString(CalendarAclEntry, xml_string) def CalendarListFeedFromString(xml_string): return atom.CreateClassFromXMLString(CalendarListFeed, xml_string) def CalendarAclFeedFromString(xml_string): return atom.CreateClassFromXMLString(CalendarAclFeed, xml_string) def CalendarEventFeedFromString(xml_string): return atom.CreateClassFromXMLString(CalendarEventFeed, xml_string) def CalendarEventCommentFeedFromString(xml_string): return atom.CreateClassFromXMLString(CalendarEventCommentFeed, xml_string)
jaesivsm/the_conf
refs/heads/master
the_conf/__init__.py
1
from .the_conf import TheConf __all__ = ['TheConf']
Aasmi/scikit-learn
refs/heads/master
sklearn/preprocessing/__init__.py
14
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization and imputation methods. """ from .data import Binarizer from .data import KernelCenterer from .data import MinMaxScaler from .data import MaxAbsScaler from .data import Normalizer from .data import RobustScaler from .data import StandardScaler from .data import add_dummy_feature from .data import binarize from .data import normalize from .data import scale from .data import robust_scale from .data import maxabs_scale from .data import OneHotEncoder from .data import PolynomialFeatures from .label import label_binarize from .label import LabelBinarizer from .label import LabelEncoder from .label import MultiLabelBinarizer from .imputation import Imputer __all__ = [ 'Binarizer', 'Imputer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'label_binarize', ]
o0neup/ibis
refs/heads/master
docs/source/conf.py
8
# -*- coding: utf-8 -*- # # Ibis documentation build configuration file, created by # sphinx-quickstart on Wed Jun 10 11:06:29 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import glob import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', 'numpydoc' ] autosummary_generate = glob.glob("*.rst") # autosummary_generate = True import numpydoc numpydoc_show_class_members = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ibis' copyright = u'2015, Cloudera, Inc.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # version = '0.2' from ibis import __version__ as version # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Ibisdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Ibis.tex', u'Ibis Documentation', u'Cloudera, Inc.', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ibis', u'Ibis Documentation', [u'Cloudera, Inc.'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Ibis', u'Ibis Documentation', u'Cloudera, Inc.', 'Ibis', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
shadyueh/pyranking
refs/heads/master
env/lib/python2.7/site-packages/django/contrib/gis/db/models/sql/aggregates.py
298
from django.db.models.sql import aggregates from django.db.models.sql.aggregates import * # NOQA __all__ = ['Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union'] + aggregates.__all__ warnings.warn( "django.contrib.gis.db.models.sql.aggregates is deprecated. Use " "django.contrib.gis.db.models.aggregates instead.", RemovedInDjango110Warning, stacklevel=2)
rackerlabs/deuce-client
refs/heads/master
deuceclient/tests/test_api_v1.py
1
""" Tests - Deuce Client - API - V1 URLs """ import hashlib import json import os import random from unittest import TestCase import uuid import httpretty import mock import deuceclient.api as api import deuceclient.tests as baseline class DeuceClientV1ApiSupportTests(TestCase): def setUp(self): super(DeuceClientV1ApiSupportTests, self).setUp() self.vault_name = baseline.create_vault_name() self.file_name = baseline.create_file() self.block = baseline.create_block() self.block_id = self.block[0] self.storage_block_id = baseline.create_storage_block() def test_v1_base_url(self): self.assertEqual(baseline.get_base_path(), api.v1.get_base_path()) def test_v1_vault_path(self): path = baseline.get_vault_path(self.vault_name) self.assertEqual(path, api.v1.get_vault_path(self.vault_name)) def test_v1_files_path(self): path = baseline.get_files_path(self.vault_name) self.assertEqual(path, api.v1.get_files_path(self.vault_name)) def test_v1_file_path(self): path = baseline.get_file_path(self.vault_name, self.file_name) self.assertEqual(path, api.v1.get_file_path(self.vault_name, self.file_name)) def test_v1_fileblocks_path(self): path = baseline.get_fileblocks_path(self.vault_name, self.file_name) self.assertEqual(path, api.v1.get_fileblocks_path(self.vault_name, self.file_name)) def test_v1_blocks_path(self): path = baseline.get_blocks_path(self.vault_name) self.assertEqual(path, api.v1.get_blocks_path(self.vault_name)) def test_v1_block_path(self): path = baseline.get_block_path(self.vault_name, self.block_id) self.assertEqual(path, api.v1.get_block_path(self.vault_name, self.block_id)) def test_v1_storage_path(self): path = baseline.get_storage_path(self.vault_name) self.assertEqual(path, api.v1.get_storage_path(self.vault_name)) def test_v1_storage_blocks_path(self): path = baseline.get_storage_blocks_path(self.vault_name) self.assertEqual(path, api.v1.get_storage_blocks_path(self.vault_name)) def test_v1_storage_block_path(self): path = baseline.get_storage_block_path(self.vault_name, self.storage_block_id) self.assertEqual(path, api.v1.get_storage_block_path(self.vault_name, self.storage_block_id))
markfinger/python-react
refs/heads/master
react/apps.py
2
from django.conf import settings from django.apps import AppConfig import react.conf class ReactConfig(AppConfig): name = 'react' def ready(self): react.conf.settings._PROXY_DJANGO_SETTINGS = True react.conf.settings.configure( **getattr(settings, 'REACT', {}) )
kgururaj/rocks-centos7
refs/heads/master
centos7_ks/scripts/disk.py
1
import sys import os sys.path = [ '/export/rocks/install/rocks-dist/x86_64/build/include/installclass' ] + sys.path; import re import subprocess import shlex #import argparse membership = '&membership;' nodename = '&hostname;' def find_disk_size(disk_size_str): match = re.match(r"([0-9\.]+)([a-z]+)", disk_size_str, re.I) if match: items = match.groups(); if(len(items) == 2): size_units = { 'KB':1024, 'MB':1024*1024, 'GB':1024*1024*1024, 'TB':1024*1024*1024*1024, 'PB':1024*1024*1024*1024*1024 }; if(items[1] in size_units): disk_size = float(items[0])*size_units[items[1]]; return disk_size; return None; def find_disk_id(line): tokens = line.split(); if(len(tokens) >= 3 and tokens[0] == 'Disk'): disk = tokens[1]; disk = disk.replace(':',''); return disk; return None; def find_os_disk(disks): disk_list = [ ]; for disk in disks: disk_id = disk[0]; disk_size = find_disk_size(disk[1]); if(disk_size and disk_id): disk_list.append((disk_id, disk_size)); else: return None; #could not determine size for one of the disks - play safe and return None if(len(disk_list) > 0): disk_list.sort(key=lambda x:x[1]); fptr = open('/tmp/disk_list.csv','wb'); first_disk = True; for disk_id,disk_size in disk_list: fptr.write(disk_id+',%.2f'%(disk_size)); if(first_disk): fptr.write(',OS_disk'); fptr.write('\n'); first_disk = False; fptr.close(); return (disk_list[0][0], disk_list[0][1]); #smallest disk return None; def doDisk(file, disk, disk_size, force_format=False, root_partition_size=None): if(force_format): parted_cmd = '/sbin/parted -s '+disk+' mklabel msdos'; print(parted_cmd); process = subprocess.Popen(shlex.split(parted_cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT); stdout_str = process.communicate()[0]; print(stdout_str); disk = os.path.basename(disk); file.write('zerombr\n') file.write('clearpart --drives=%s\n' % disk) disk_size = int(float(disk_size)/(1024*1024*1024)); #GB max_swap = 64; #64 GB recommended_root_size = 150; #150 GB if(root_partition_size): root_partition_size = min(disk_size, root_partition_size); swap_size = min(max_swap, disk_size-root_partition_size); else: if(disk_size <= recommended_root_size): #<=recommended, no swap root_partition_size = disk_size; swap_size = 0; else: swap_size = min(max_swap, disk_size-recommended_root_size); root_partition_size = disk_size-swap_size; file.write(('part biosboot --fstype=biosboot --size=1 --ondisk=%s\n')%(disk)) if(swap_size > 0): file.write(('part swap --fstype="swap" --size=%d --ondisk=%s\n') % (swap_size*1024, disk)) #MB - as per kickstart if(root_partition_size < (disk_size - swap_size)): file.write('part / --fstype="ext4" --size=%d --ondisk=%s\n' % (root_partition_size*1024, disk)) else: file.write('part / --fstype="ext4" --size=1 --grow --ondisk=%s\n' % disk) file.write('bootloader --location=mbr --boot-drive=%s\n'%disk); # # main # #p = rocks_partition.RocksPartition(); #disks = p.getDisks(); def get_parted_disk_list(): process = subprocess.Popen(shlex.split('/sbin/parted -l -m'), stdout=subprocess.PIPE); stdout_str = process.communicate()[0]; list = stdout_str.split('\n'); #First initialize uninitialized disks - create msdos if needed for line in list: if(line.find('Error') != -1 and line.find('unrecognised disk label') != -1): new_line = line.strip(); tokens = new_line.split(); if(len(tokens) > 2): disk = tokens[1].replace(':',''); subprocess.call(shlex.split('/sbin/parted -s '+disk+' mktable msdos')); #Now create list of disks process = subprocess.Popen(shlex.split('/sbin/parted -l -m'), stdout=subprocess.PIPE); stdout_str = process.communicate()[0]; list = stdout_str.split('\n'); disk_list=[]; next_is_disk = False; for line in list: if(next_is_disk): new_line = line.strip(); if(new_line.find('loop') == -1 and new_line.find('zram') == -1): #not a loop-back device and not a ramdisk device new_line = new_line.replace(';',''); disk_list.append(new_line.split(':')); next_is_disk = False; if(line.find('BYT') != -1): next_is_disk = True; return disk_list; def main(argv): arg_needs_value = { '--root_partition_size':True, '--force_format':False, '--manual':False }; param_values = { 'manual':False, 'force_format':False, 'root_partition_size':None } ; #GB node_type = None; i = 0; for i in range(len(argv)): arg = argv[i]; if(arg in arg_needs_value): cut_arg = arg.replace('--',''); if(arg_needs_value[arg]): param_values[cut_arg] = argv[i+1]; i += 1; else: param_values[cut_arg] = True; else: if(node_type): sys.stderr.write('Node type already assigned to '+node_type+', only 1 positional argument expected\n'); sys.exit(-1); node_type = arg; i += 1; if(param_values['root_partition_size']): param_values['root_partition_size'] = int(param_values['root_partition_size']); if(not node_type): param_values['manual'] = True; #parser.add_argument("node_type", help='Type of node - Lustre server (OSS/MDS/MGS), compute etc'); #parser.add_argument("--root_partition_size", help="Root partition size in GB(integer)", default=100, type=int); #GB #group = parser.add_mutually_exclusive_group() #group.add_argument("--force_format", help='Force format of whole OS disk', action="store_true"); #group.add_argument("--manual", help='Manual format', action="store_true"); #args = parser.parse_args(); file = open('/tmp/user_partition_info', 'wb') #Manual format if manual_format or (Lustre servers and not force_format) if param_values['manual'] or (node_type == 'lustre_server' and not param_values['force_format']): file.write('rocks manual\n'); file.close(); sys.exit(0); disks = get_parted_disk_list(); os_disk_tuple = find_os_disk(disks); if(os_disk_tuple): doDisk(file, os_disk_tuple[0], os_disk_tuple[1], force_format=param_values['force_format'], root_partition_size=param_values['root_partition_size']); else: #could not find a suitable disk, use manual file.write("rocks manual\n"); file.close() if __name__ == "__main__": main(sys.argv[1:] if len(sys.argv) > 1 else []);
kartikdhar/djangotest
refs/heads/master
virt1/lib/python2.7/site-packages/django/contrib/gis/sitemaps/kml.py
398
from django.apps import apps from django.contrib.gis.db.models.fields import GeometryField from django.contrib.sitemaps import Sitemap from django.core import urlresolvers from django.db import models class KMLSitemap(Sitemap): """ A minimal hook to produce KML sitemaps. """ geo_format = 'kml' def __init__(self, locations=None): # If no locations specified, then we try to build for # every model in installed applications. self.locations = self._build_kml_sources(locations) def _build_kml_sources(self, sources): """ Goes through the given sources and returns a 3-tuple of the application label, module name, and field name of every GeometryField encountered in the sources. If no sources are provided, then all models. """ kml_sources = [] if sources is None: sources = apps.get_models() for source in sources: if isinstance(source, models.base.ModelBase): for field in source._meta.fields: if isinstance(field, GeometryField): kml_sources.append((source._meta.app_label, source._meta.model_name, field.name)) elif isinstance(source, (list, tuple)): if len(source) != 3: raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).') kml_sources.append(source) else: raise TypeError('KML Sources must be a model or a 3-tuple.') return kml_sources def get_urls(self, page=1, site=None, protocol=None): """ This method is overrridden so the appropriate `geo_format` attribute is placed on each URL element. """ urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol) for url in urls: url['geo_format'] = self.geo_format return urls def items(self): return self.locations def location(self, obj): return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format, kwargs={'label': obj[0], 'model': obj[1], 'field_name': obj[2], } ) class KMZSitemap(KMLSitemap): geo_format = 'kmz'
ivanhorvath/openshift-tools
refs/heads/prod
openshift/installer/vendored/openshift-ansible-3.7.52-1/roles/lib_openshift/library/oc_version.py
6
#!/usr/bin/env python # pylint: disable=missing-docstring # flake8: noqa: T001 # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| # # Copyright 2016 Red Hat, Inc. and/or its affiliates # and other contributors as indicated by the @author tags. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- ''' OpenShiftCLI class that wraps the oc commands in a subprocess ''' # pylint: disable=too-many-lines from __future__ import print_function import atexit import copy import json import os import re import shutil import subprocess import tempfile # pylint: disable=import-error try: import ruamel.yaml as yaml except ImportError: import yaml from ansible.module_utils.basic import AnsibleModule # -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- # -*- -*- -*- Begin included fragment: doc/version -*- -*- -*- DOCUMENTATION = ''' --- module: oc_version short_description: Return the current openshift version description: - Return the openshift installed version. `oc version` options: state: description: - Currently list is only supported state. required: true default: list choices: ["list"] aliases: [] kubeconfig: description: - The path for the kubeconfig file to use for authentication required: false default: /etc/origin/master/admin.kubeconfig aliases: [] debug: description: - Turn on debug output. required: false default: False aliases: [] author: - "Kenny Woodson <[email protected]>" extends_documentation_fragment: [] ''' EXAMPLES = ''' oc_version: - name: get oc version oc_version: register: oc_version ''' # -*- -*- -*- End included fragment: doc/version -*- -*- -*- # -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- class YeditException(Exception): # pragma: no cover ''' Exception class for Yedit ''' pass # pylint: disable=too-many-public-methods class Yedit(object): # pragma: no cover ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False): self.content = content self._separator = separator self.filename = filename self.__yaml_dict = content self.content_type = content_type self.backup = backup self.load(content_type=self.content_type) if self.__yaml_dict is None: self.__yaml_dict = {} @property def separator(self): ''' getter method for separator ''' return self._separator @separator.setter def separator(self, inc_sep): ''' setter method for separator ''' self._separator = inc_sep @property def yaml_dict(self): ''' getter method for yaml_dict ''' return self.__yaml_dict @yaml_dict.setter def yaml_dict(self, value): ''' setter method for yaml_dict ''' self.__yaml_dict = value @staticmethod def parse_key(key, sep='.'): '''parse the key allowing the appropriate separator''' common_separators = list(Yedit.com_sep - set([sep])) return re.findall(Yedit.re_key.format(''.join(common_separators)), key) @staticmethod def valid_key(key, sep='.'): '''validate the incoming key''' common_separators = list(Yedit.com_sep - set([sep])) if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key): return False return True @staticmethod def remove_entry(data, key, sep='.'): ''' remove data at location key ''' if key == '' and isinstance(data, dict): data.clear() return True elif key == '' and isinstance(data, list): del data[:] return True if not (key and Yedit.valid_key(key, sep)) and \ isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None # process last index for remove # expected list entry if key_indexes[-1][0]: if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 del data[int(key_indexes[-1][0])] return True # expected dict entry elif key_indexes[-1][1]: if isinstance(data, dict): del data[key_indexes[-1][1]] return True @staticmethod def add_entry(data, key, item=None, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a#b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key: if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501 data = data[dict_key] continue elif data and not isinstance(data, dict): raise YeditException("Unexpected item type found while going through key " + "path: {} (at key: {})".format(key, dict_key)) data[dict_key] = {} data = data[dict_key] elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: raise YeditException("Unexpected item type found while going through key path: {}".format(key)) if key == '': data = item # process last index for add # expected list entry elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 data[int(key_indexes[-1][0])] = item # expected dict entry elif key_indexes[-1][1] and isinstance(data, dict): data[key_indexes[-1][1]] = item # didn't add/update to an existing list, nor add/update key to a dict # so we must have been provided some syntax like a.b.c[<int>] = "data" for a # non-existent array else: raise YeditException("Error adding to object at path: {}".format(key)) return data @staticmethod def get_entry(data, key, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a.b return c ''' if key == '': pass elif (not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict))): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes: if dict_key and isinstance(data, dict): data = data.get(dict_key) elif (arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1): data = data[int(arr_ind)] else: return None return data @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' tmp_filename = filename + '.yedit' with open(tmp_filename, 'w') as yfd: yfd.write(contents) os.rename(tmp_filename, filename) def write(self): ''' write to file ''' if not self.filename: raise YeditException('Please specify a filename.') if self.backup and self.file_exists(): shutil.copy(self.filename, self.filename + '.orig') # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripDumper if supported. try: Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) except AttributeError: Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) return (True, self.yaml_dict) def read(self): ''' read from file ''' # check if it exists if self.filename is None or not self.file_exists(): return None contents = None with open(self.filename) as yfd: contents = yfd.read() return contents def file_exists(self): ''' return whether file exists ''' if os.path.exists(self.filename): return True return False def load(self, content_type='yaml'): ''' return yaml file ''' contents = self.read() if not contents and not self.content: return None if self.content: if isinstance(self.content, dict): self.yaml_dict = self.content return self.yaml_dict elif isinstance(self.content, str): contents = self.content # check if it is yaml try: if content_type == 'yaml' and contents: # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass # Try to use RoundTripLoader if supported. try: self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) except AttributeError: self.yaml_dict = yaml.safe_load(contents) # Try to set format attributes if supported try: self.yaml_dict.fa.set_block_style() except AttributeError: pass elif content_type == 'json' and contents: self.yaml_dict = json.loads(contents) except yaml.YAMLError as err: # Error loading yaml or json raise YeditException('Problem with loading yaml file. {}'.format(err)) return self.yaml_dict def get(self, key): ''' get a specified key''' try: entry = Yedit.get_entry(self.yaml_dict, key, self.separator) except KeyError: entry = None return entry def pop(self, path, key_or_item): ''' remove a key, value pair from a dict or an item for a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if key_or_item in entry: entry.pop(key_or_item) return (True, self.yaml_dict) return (False, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None try: ind = entry.index(key_or_item) except ValueError: return (False, self.yaml_dict) entry.pop(ind) return (True, self.yaml_dict) return (False, self.yaml_dict) def delete(self, path): ''' remove path from a dict''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: return (False, self.yaml_dict) result = Yedit.remove_entry(self.yaml_dict, path, self.separator) if not result: return (False, self.yaml_dict) return (True, self.yaml_dict) def exists(self, path, value): ''' check if value exists at path''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, list): if value in entry: return True return False elif isinstance(entry, dict): if isinstance(value, dict): rval = False for key, val in value.items(): if entry[key] != val: rval = False break else: rval = True return rval return value in entry return entry == value def append(self, path, value): '''append value to a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry is None: self.put(path, []) entry = Yedit.get_entry(self.yaml_dict, path, self.separator) if not isinstance(entry, list): return (False, self.yaml_dict) # AUDIT:maybe-no-member makes sense due to loading data from # a serialized format. # pylint: disable=maybe-no-member entry.append(value) return (True, self.yaml_dict) # pylint: disable=too-many-arguments def update(self, path, value, index=None, curr_value=None): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if not isinstance(value, dict): raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + 'value=[{}] type=[{}]'.format(value, type(value))) entry.update(value) return (True, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None if curr_value: try: ind = entry.index(curr_value) except ValueError: return (False, self.yaml_dict) elif index is not None: ind = index if ind is not None and entry[ind] != value: entry[ind] = value return (True, self.yaml_dict) # see if it exists in the list try: ind = entry.index(value) except ValueError: # doesn't exist, append it entry.append(value) return (True, self.yaml_dict) # already exists, return if ind is not None: return (False, self.yaml_dict) return (False, self.yaml_dict) def put(self, path, value): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if entry == value: return (False, self.yaml_dict) # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is None: return (False, self.yaml_dict) # When path equals "" it is a special case. # "" refers to the root of the document # Only update the root path (entire document) when its a list or dict if path == '': if isinstance(result, list) or isinstance(result, dict): self.yaml_dict = result return (True, self.yaml_dict) return (False, self.yaml_dict) self.yaml_dict = tmp_copy return (True, self.yaml_dict) def create(self, path, value): ''' create a yaml file ''' if not self.file_exists(): # deepcopy didn't work # Try to use ruamel.yaml and fallback to pyyaml try: tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) except AttributeError: tmp_copy = copy.deepcopy(self.yaml_dict) # set the format attributes if available try: tmp_copy.fa.set_block_style() except AttributeError: pass result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result is not None: self.yaml_dict = tmp_copy return (True, self.yaml_dict) return (False, self.yaml_dict) @staticmethod def get_curr_value(invalue, val_type): '''return the current value''' if invalue is None: return None curr_value = invalue if val_type == 'yaml': curr_value = yaml.load(invalue) elif val_type == 'json': curr_value = json.loads(invalue) return curr_value @staticmethod def parse_value(inc_value, vtype=''): '''determine value type passed''' true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', ] false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF'] # It came in as a string but you didn't specify value_type as string # we will convert to bool if it matches any of the above cases if isinstance(inc_value, str) and 'bool' in vtype: if inc_value not in true_bools and inc_value not in false_bools: raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype)) elif isinstance(inc_value, bool) and 'str' in vtype: inc_value = str(inc_value) # There is a special case where '' will turn into None after yaml loading it so skip if isinstance(inc_value, str) and inc_value == '': pass # If vtype is not str then go ahead and attempt to yaml load it. elif isinstance(inc_value, str) and 'str' not in vtype: try: inc_value = yaml.safe_load(inc_value) except Exception: raise YeditException('Could not determine type of incoming value. ' + 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype)) return inc_value @staticmethod def process_edits(edits, yamlfile): '''run through a list of edits and process them one-by-one''' results = [] for edit in edits: value = Yedit.parse_value(edit['value'], edit.get('value_type', '')) if edit.get('action') == 'update': # pylint: disable=line-too-long curr_value = Yedit.get_curr_value( Yedit.parse_value(edit.get('curr_value')), edit.get('curr_value_format')) rval = yamlfile.update(edit['key'], value, edit.get('index'), curr_value) elif edit.get('action') == 'append': rval = yamlfile.append(edit['key'], value) else: rval = yamlfile.put(edit['key'], value) if rval[0]: results.append({'key': edit['key'], 'edit': rval[1]}) return {'changed': len(results) > 0, 'results': results} # pylint: disable=too-many-return-statements,too-many-branches @staticmethod def run_ansible(params): '''perform the idempotent crud operations''' yamlfile = Yedit(filename=params['src'], backup=params['backup'], separator=params['separator']) state = params['state'] if params['src']: rval = yamlfile.load() if yamlfile.yaml_dict is None and state != 'present': return {'failed': True, 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) + 'file exists, that it is has correct permissions, and is valid yaml.'} if state == 'list': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['key']: rval = yamlfile.get(params['key']) return {'changed': False, 'result': rval, 'state': state} elif state == 'absent': if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) yamlfile.yaml_dict = content if params['update']: rval = yamlfile.pop(params['key'], params['value']) else: rval = yamlfile.delete(params['key']) if rval[0] and params['src']: yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} elif state == 'present': # check if content is different than what is in the file if params['content']: content = Yedit.parse_value(params['content'], params['content_type']) # We had no edits to make and the contents are the same if yamlfile.yaml_dict == content and \ params['value'] is None: return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} yamlfile.yaml_dict = content # If we were passed a key, value then # we enapsulate it in a list and process it # Key, Value passed to the module : Converted to Edits list # edits = [] _edit = {} if params['value'] is not None: _edit['value'] = params['value'] _edit['value_type'] = params['value_type'] _edit['key'] = params['key'] if params['update']: _edit['action'] = 'update' _edit['curr_value'] = params['curr_value'] _edit['curr_value_format'] = params['curr_value_format'] _edit['index'] = params['index'] elif params['append']: _edit['action'] = 'append' edits.append(_edit) elif params['edits'] is not None: edits = params['edits'] if edits: results = Yedit.process_edits(edits, yamlfile) # if there were changes and a src provided to us we need to write if results['changed'] and params['src']: yamlfile.write() return {'changed': results['changed'], 'result': results['results'], 'state': state} # no edits to make if params['src']: # pylint: disable=redefined-variable-type rval = yamlfile.write() return {'changed': rval[0], 'result': rval[1], 'state': state} # We were passed content but no src, key or value, or edits. Return contents in memory return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} return {'failed': True, 'msg': 'Unkown state passed'} # -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- # -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- # pylint: disable=too-many-lines # noqa: E301,E302,E303,T001 class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) self.all_namespaces = all_namespaces self.oc_binary = locate_oc_binary() # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, res['results'][0], separator=sep) changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''replace the current object with oc replace''' # We are removing the 'resourceVersion' to handle # a race condition when modifying oc objects yed = Yedit(fname) results = yed.delete('metadata.resourceVersion') if results[0]: yed.write() cmd = ['replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''create a temporary file and then call oc create on it''' fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501 '''process a template template_name: the name of the template to process create: whether to send to oc create after processing params: the parameters for the template template_data: the incoming template's data; instead of a file ''' cmd = ['process'] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = Utils.create_tmpfile(template_name + '-') yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['create', '-f', fname]) def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) def _run(self, cmds, input_data): ''' Actually executes the command. This makes mocking easier. ''' curr_env = os.environ.copy() curr_env.update({'KUBECONFIG': self.kubeconfig}) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=curr_env) stdout, stderr = proc.communicate(input_data) return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, "cmd": ' '.join(cmds)} if output_type == 'json': rval['results'] = {} if output and stdout: try: rval['results'] = json.loads(stdout) except ValueError as verr: if "No JSON object could be decoded" in verr.args: rval['err'] = verr.args elif output_type == 'raw': rval['results'] = stdout if output else '' if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, "stdout": stdout}) return rval class Utils(object): # pragma: no cover ''' utilities for openshiftcli modules ''' @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' tmp = Utils.create_tmpfile(prefix=rname) if ftype == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripDumper'): Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) else: Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) elif ftype == 'json': Utils._write(tmp, json.dumps(data)) else: Utils._write(tmp, data) # Register cleanup when module is done atexit.register(Utils.cleanup, [tmp]) return tmp @staticmethod def create_tmpfile_copy(inc_file): '''create a temporary copy of a file''' tmpfile = Utils.create_tmpfile('lib_openshift-') Utils._write(tmpfile, open(inc_file).read()) # Cleanup the tmpfile atexit.register(Utils.cleanup, [tmpfile]) return tmpfile @staticmethod def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name @staticmethod def create_tmp_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_tmp_file_from_contents(item['path'] + '-', item['data'], ftype=content_type) files.append({'name': os.path.basename(item['path']), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if 'metadata' in result and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripLoader'): contents = yaml.load(contents, yaml.RoundTripLoader) else: contents = yaml.safe_load(contents) elif sfile_type == 'json': contents = json.loads(contents) return contents @staticmethod def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict @staticmethod def add_custom_versions(versions): ''' create custom versions strings ''' versions_dict = {} for tech, version in versions.items(): # clean up "-" from version if "-" in version: version = version.split("-")[0] if version.startswith('v'): version = version[1:] # Remove the 'v' prefix versions_dict[tech + '_numeric'] = version.split('+')[0] # "3.3.0.33" is what we have, we want "3.3" versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.')) return versions_dict @staticmethod def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0 # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if key not in user_def: if debug: print('User data does not have key [%s]' % key) print('User data: %s' % user_def) return False if not isinstance(user_def[key], list): if debug: print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) return False if len(user_def[key]) != len(value): if debug: print("List lengths are not equal.") print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) print("user_def: %s" % user_def[key]) print("value: %s" % value) return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print('sending list - list') print(type(values[0])) print(type(values[1])) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print('list compare returned false') return False elif value != user_def[key]: if debug: print('value should be identical') print(user_def[key]) print(value) return False # recurse on a dictionary elif isinstance(value, dict): if key not in user_def: if debug: print("user_def does not have key [%s]" % key) return False if not isinstance(user_def[key], dict): if debug: print("dict returned false: not instance of dict") return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print("keys are not equal in dict") print(user_values) print(api_values) return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print("dict returned false") print(result) return False # Verify each key, value pair is the same else: if key not in user_def or value != user_def[key]: if debug: print("value not equal; user_def does not have key") print(key) print(value) if key in user_def: print(user_def[key]) return False if debug: print('returning true') return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self, ascommalist=''): '''return all options as a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs''' return self.stringify(ascommalist) def stringify(self, ascommalist=''): ''' return the options hash as cli params in a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs ''' rval = [] for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: val = data['value'] rval.append('--{}={}'.format(key.replace('_', '-'), val)) return rval # -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- # -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*- # pylint: disable=too-many-instance-attributes class OCVersion(OpenShiftCLI): ''' Class to wrap the oc command line tools ''' # pylint allows 5 # pylint: disable=too-many-arguments def __init__(self, config, debug): ''' Constructor for OCVersion ''' super(OCVersion, self).__init__(None, config) self.debug = debug def get(self): '''get and return version information ''' results = {} version_results = self._version() if version_results['returncode'] == 0: filtered_vers = Utils.filter_versions(version_results['results']) custom_vers = Utils.add_custom_versions(filtered_vers) results['returncode'] = version_results['returncode'] results.update(filtered_vers) results.update(custom_vers) return results raise OpenShiftCLIError('Problem detecting openshift version.') @staticmethod def run_ansible(params): '''run the idempotent ansible code''' oc_version = OCVersion(params['kubeconfig'], params['debug']) if params['state'] == 'list': #pylint: disable=protected-access result = oc_version.get() return {'state': params['state'], 'results': result, 'changed': False} # -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*- # -*- -*- -*- Begin included fragment: ansible/oc_version.py -*- -*- -*- def main(): ''' ansible oc module for version ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='list', type='str', choices=['list']), debug=dict(default=False, type='bool'), ), supports_check_mode=True, ) rval = OCVersion.run_ansible(module.params) if 'failed' in rval: module.fail_json(**rval) module.exit_json(**rval) if __name__ == '__main__': main() # -*- -*- -*- End included fragment: ansible/oc_version.py -*- -*- -*-
UstadMobile/exelearning-ustadmobile-work
refs/heads/master
twisted/web/http_headers.py
44
# -*- test-case-name: twisted.web.test.test_http_headers # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ An API for storing HTTP header names and values. """ from __future__ import division, absolute_import from collections import MutableMapping from twisted.python.compat import comparable, cmp def _dashCapitalize(name): """ Return a byte string which is capitalized using '-' as a word separator. @param name: The name of the header to capitalize. @type name: C{bytes} @return: The given header capitalized using '-' as a word separator. @rtype: C{bytes} """ return b'-'.join([word.capitalize() for word in name.split(b'-')]) class _DictHeaders(MutableMapping): """ A C{dict}-like wrapper around L{Headers} to provide backwards compatibility for L{twisted.web.http.Request.received_headers} and L{twisted.web.http.Request.headers} which used to be plain C{dict} instances. @type _headers: L{Headers} @ivar _headers: The real header storage object. """ def __init__(self, headers): self._headers = headers def __getitem__(self, key): """ Return the last value for header of C{key}. """ if self._headers.hasHeader(key): return self._headers.getRawHeaders(key)[-1] raise KeyError(key) def __setitem__(self, key, value): """ Set the given header. """ self._headers.setRawHeaders(key, [value]) def __delitem__(self, key): """ Delete the given header. """ if self._headers.hasHeader(key): self._headers.removeHeader(key) else: raise KeyError(key) def __iter__(self): """ Return an iterator of the lowercase name of each header present. """ for k, v in self._headers.getAllRawHeaders(): yield k.lower() def __len__(self): """ Return the number of distinct headers present. """ # XXX Too many _ return len(self._headers._rawHeaders) # Extra methods that MutableMapping doesn't care about but that we do. def copy(self): """ Return a C{dict} mapping each header name to the last corresponding header value. """ return dict(self.items()) def has_key(self, key): """ Return C{True} if C{key} is a header in this collection, C{False} otherwise. """ return key in self @comparable class Headers(object): """ This class stores the HTTP headers as both a parsed representation and the raw string representation. It converts between the two on demand. @cvar _caseMappings: A C{dict} that maps lowercase header names to their canonicalized representation. @ivar _rawHeaders: A C{dict} mapping header names as C{bytes} to C{lists} of header values as C{bytes}. """ _caseMappings = { b'content-md5': b'Content-MD5', b'dnt': b'DNT', b'etag': b'ETag', b'p3p': b'P3P', b'te': b'TE', b'www-authenticate': b'WWW-Authenticate', b'x-xss-protection': b'X-XSS-Protection'} def __init__(self, rawHeaders=None): self._rawHeaders = {} if rawHeaders is not None: for name, values in rawHeaders.items(): self.setRawHeaders(name, values[:]) def __repr__(self): """ Return a string fully describing the headers set on this object. """ return '%s(%r)' % (self.__class__.__name__, self._rawHeaders,) def __cmp__(self, other): """ Define L{Headers} instances as being equal to each other if they have the same raw headers. """ if isinstance(other, Headers): return cmp( sorted(self._rawHeaders.items()), sorted(other._rawHeaders.items())) return NotImplemented def copy(self): """ Return a copy of itself with the same headers set. """ return self.__class__(self._rawHeaders) def hasHeader(self, name): """ Check for the existence of a given header. @type name: C{bytes} @param name: The name of the HTTP header to check for. @rtype: C{bool} @return: C{True} if the header exists, otherwise C{False}. """ return name.lower() in self._rawHeaders def removeHeader(self, name): """ Remove the named header from this header object. @type name: C{bytes} @param name: The name of the HTTP header to remove. @return: C{None} """ self._rawHeaders.pop(name.lower(), None) def setRawHeaders(self, name, values): """ Sets the raw representation of the given header. @type name: C{bytes} @param name: The name of the HTTP header to set the values for. @type values: C{list} @param values: A list of strings each one being a header value of the given name. @return: C{None} """ if not isinstance(values, list): raise TypeError("Header entry %r should be list but found " "instance of %r instead" % (name, type(values))) self._rawHeaders[name.lower()] = values def addRawHeader(self, name, value): """ Add a new raw value for the given header. @type name: C{bytes} @param name: The name of the header for which to set the value. @type value: C{bytes} @param value: The value to set for the named header. """ values = self.getRawHeaders(name) if values is None: self.setRawHeaders(name, [value]) else: values.append(value) def getRawHeaders(self, name, default=None): """ Returns a list of headers matching the given name as the raw string given. @type name: C{bytes} @param name: The name of the HTTP header to get the values of. @param default: The value to return if no header with the given C{name} exists. @rtype: C{list} @return: A C{list} of values for the given header. """ return self._rawHeaders.get(name.lower(), default) def getAllRawHeaders(self): """ Return an iterator of key, value pairs of all headers contained in this object, as strings. The keys are capitalized in canonical capitalization. """ for k, v in self._rawHeaders.items(): yield self._canonicalNameCaps(k), v def _canonicalNameCaps(self, name): """ Return the canonical name for the given header. @type name: C{bytes} @param name: The all-lowercase header name to capitalize in its canonical form. @rtype: C{bytes} @return: The canonical name of the header. """ return self._caseMappings.get(name, _dashCapitalize(name)) __all__ = ['Headers']
xxsergzzxx/python-for-android
refs/heads/master
python-modules/twisted/twisted/conch/insults/colors.py
146
""" You don't really want to use this module. Try helper.py instead. """ CLEAR = 0 BOLD = 1 DIM = 2 ITALIC = 3 UNDERSCORE = 4 BLINK_SLOW = 5 BLINK_FAST = 6 REVERSE = 7 CONCEALED = 8 FG_BLACK = 30 FG_RED = 31 FG_GREEN = 32 FG_YELLOW = 33 FG_BLUE = 34 FG_MAGENTA = 35 FG_CYAN = 36 FG_WHITE = 37 BG_BLACK = 40 BG_RED = 41 BG_GREEN = 42 BG_YELLOW = 43 BG_BLUE = 44 BG_MAGENTA = 45 BG_CYAN = 46 BG_WHITE = 47
dudepare/django
refs/heads/master
tests/fixtures_model_package/tests.py
312
from __future__ import unicode_literals import warnings from django.core import management from django.test import TestCase from .models import Article class SampleTestCase(TestCase): fixtures = ['fixture1.json', 'fixture2.json'] def testClassFixtures(self): "Test cases can load fixture objects into models defined in packages" self.assertEqual(Article.objects.count(), 3) self.assertQuerysetEqual( Article.objects.all(), [ "Django conquers world!", "Copyright is fine the way it is", "Poker has no place on ESPN", ], lambda a: a.headline ) class FixtureTestCase(TestCase): def test_loaddata(self): "Fixtures can load data into models defined in packages" # Load fixture 1. Single JSON file, with two objects management.call_command("loaddata", "fixture1.json", verbosity=0) self.assertQuerysetEqual( Article.objects.all(), [ "Time to reform copyright", "Poker has no place on ESPN", ], lambda a: a.headline, ) # Load fixture 2. JSON file imported by default. Overwrites some # existing objects management.call_command("loaddata", "fixture2.json", verbosity=0) self.assertQuerysetEqual( Article.objects.all(), [ "Django conquers world!", "Copyright is fine the way it is", "Poker has no place on ESPN", ], lambda a: a.headline, ) # Load a fixture that doesn't exist with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") management.call_command("loaddata", "unknown.json", verbosity=0) self.assertEqual(len(w), 1) self.assertTrue(w[0].message, "No fixture named 'unknown' found.") self.assertQuerysetEqual( Article.objects.all(), [ "Django conquers world!", "Copyright is fine the way it is", "Poker has no place on ESPN", ], lambda a: a.headline, )
CoolProp/CoolProp-museum
refs/heads/master
wrappers/Python/CoolProp/Plots/Tests.py
2
# -*- coding: utf-8 -*- """ Created on Thu Sep 12 18:39:22 2013 @author: logan """ from Plots import PropsPlot #TODO: Change to absolute import def main(): fluid_ref = 'n-Pentane' for plot_type in ['Ts']: #['pt', 'ph', 'ps', 'ts', 'pt', 'prho', 'trho']: plt = PropsPlot(fluid_ref, plot_type) plt.set_axis_limits([-0.5, 1.5, 300, 530]) plt.draw_isolines('Q', [0.1, 0.9]) plt.draw_isolines('P', [100, 2000]) plt.draw_isolines('D', [2, 600]) plt.show() if __name__ == "__main__": main()
jroyal/drafthouse-api
refs/heads/master
drafthouse/cinemas.py
1
from feed import get_feed from films import Film import json def get_cinemas(market_id): cinemas = [] feed = get_feed(market_id) for cinema in feed["Market"]["Dates"][0]["Cinemas"]: cinemas.append({ "cinema_name": cinema["CinemaName"], "cinema_id": cinema["CinemaId"] }) return cinemas def get_cinema(cinema_id): print "Cinema ID:", cinema_id market_id = "{}00".format(cinema_id[:2]) print "Market ID:", market_id feed = get_feed(market_id) for cinema in feed["Market"]["Dates"][0]["Cinemas"]: if cinema["CinemaId"] == cinema_id: films = [] for film in cinema["Films"]: films.append(Film(film).__dict__) print films cinema["Films"] = films return cinema return None
adamchainz/ansible
refs/heads/devel
lib/ansible/modules/notification/hipchat.py
34
#!/usr/bin/python # -*- coding: utf-8 -*- # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: hipchat version_added: "1.2" short_description: Send a message to Hipchat. description: - Send a message to a Hipchat room, with options to control the formatting. options: token: description: - API token. required: true room: description: - ID or name of the room. required: true from: description: - Name the message will appear to be sent from. Max length is 15 characters - above this it will be truncated. required: false default: Ansible msg: description: - The message body. required: true default: null color: description: - Background color for the message. required: false default: yellow choices: [ "yellow", "red", "green", "purple", "gray", "random" ] msg_format: description: - Message format. required: false default: text choices: [ "text", "html" ] notify: description: - If true, a notification will be triggered for users in the room. required: false default: 'yes' choices: [ "yes", "no" ] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 api: description: - API url if using a self-hosted hipchat server. For Hipchat API version 2 use the default URI with C(/v2) instead of C(/v1). required: false default: 'https://api.hipchat.com/v1' version_added: 1.6.0 requirements: [ ] author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)" ''' EXAMPLES = ''' - hipchat: room: notif msg: Ansible task finished # Use Hipchat API version 2 - hipchat: api: https://api.hipchat.com/v2/ token: OAUTH2_TOKEN room: notify msg: Ansible task finished ''' # =========================================== # HipChat module specific support methods. # try: import json except ImportError: import simplejson as json # import module snippets from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.six.moves.urllib.request import pathname2url from ansible.module_utils.urls import fetch_url DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" NOTIFY_URI_V2 = "/room/{id_or_name}/notification" def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V1): '''sending message to hipchat v1 server''' params = {} params['room_id'] = room params['from'] = msg_from[:15] # max length is 15 params['message'] = msg params['message_format'] = msg_format params['color'] = color params['api'] = api params['notify'] = int(notify) url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urlencode(params) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=NOTIFY_URI_V2): '''sending message to hipchat v2 server''' headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} body = dict() body['message'] = msg body['color'] = color body['message_format'] = msg_format body['notify'] = notify POST_URL = api + NOTIFY_URI_V2 url = POST_URL.replace('{id_or_name}', pathname2url(room)) data = json.dumps(body) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows # 204 to be the expected result code. if info['status'] in [200, 204]: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), msg_from=dict(default="Ansible", aliases=['from']), color=dict(default="yellow", choices=["yellow", "red", "green", "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs=dict(default='yes', type='bool'), api=dict(default=DEFAULT_URI), ), supports_check_mode=True ) token = module.params["token"] room = str(module.params["room"]) msg = module.params["msg"] msg_from = module.params["msg_from"] color = module.params["color"] msg_format = module.params["msg_format"] notify = module.params["notify"] api = module.params["api"] try: if api.find('/v2') != -1: send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception: e = get_exception() module.fail_json(msg="unable to send msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) if __name__ == '__main__': main()
VladKha/CodeWars
refs/heads/master
8 kyu/Name Shuffler/solve.py
1
def name_shuffler(s): return ' '.join(s.split()[::-1])
SlimRemix/android_external_chromium_org
refs/heads/lp5.1
tools/perf/measurements/record_per_area_unittest.py
33
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from measurements import record_per_area from telemetry.core import wpr_modes from telemetry.unittest import options_for_unittests from telemetry.unittest import page_test_test_case from telemetry.unittest import test class RecordPerAreaUnitTest(page_test_test_case.PageTestTestCase): """Smoke test for record_per_area measurement Runs record_per_area measurement on a simple page and verifies that all metrics were added to the results. The test is purely functional, i.e. it only checks if the metrics are present and non-zero. """ def setUp(self): self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF @test.Disabled('android') def testRecordPerArea(self): ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html') measurement = record_per_area.RecordPerArea() results = self.RunMeasurement(measurement, ps, options=self._options) self.assertEquals(0, len(results.failures))
zdary/intellij-community
refs/heads/master
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_0/_pkg0_0_0/_pkg0_0_0_1/_pkg0_0_0_1_0/_mod0_0_0_1_0_1.py
30
name0_0_0_1_0_1_0 = None name0_0_0_1_0_1_1 = None name0_0_0_1_0_1_2 = None name0_0_0_1_0_1_3 = None name0_0_0_1_0_1_4 = None
signed/intellij-community
refs/heads/master
python/testData/keywordCompletion/finallyInExcept.py
83
try: a = 1 except: a = 2 fina<caret>
Hellrungj/CSC-412-Networking
refs/heads/master
Protocol_Buffers/venv/lib/python2.7/site-packages/google/protobuf/descriptor_database.py
88
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Provides a container for DescriptorProtos.""" __author__ = '[email protected] (Matt Toia)' class Error(Exception): pass class DescriptorDatabaseConflictingDefinitionError(Error): """Raised when a proto is added with the same name & different descriptor.""" class DescriptorDatabase(object): """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" def __init__(self): self._file_desc_protos_by_file = {} self._file_desc_protos_by_symbol = {} def Add(self, file_desc_proto): """Adds the FileDescriptorProto and its types to this database. Args: file_desc_proto: The FileDescriptorProto to add. Raises: DescriptorDatabaseException: if an attempt is made to add a proto with the same name but different definition than an exisiting proto in the database. """ proto_name = file_desc_proto.name if proto_name not in self._file_desc_protos_by_file: self._file_desc_protos_by_file[proto_name] = file_desc_proto elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: raise DescriptorDatabaseConflictingDefinitionError( '%s already added, but with different descriptor.' % proto_name) # Add the top-level Message, Enum and Extension descriptors to the index. package = file_desc_proto.package for message in file_desc_proto.message_type: self._file_desc_protos_by_symbol.update( (name, file_desc_proto) for name in _ExtractSymbols(message, package)) for enum in file_desc_proto.enum_type: self._file_desc_protos_by_symbol[ '.'.join((package, enum.name))] = file_desc_proto for extension in file_desc_proto.extension: self._file_desc_protos_by_symbol[ '.'.join((package, extension.name))] = file_desc_proto def FindFileByName(self, name): """Finds the file descriptor proto by file name. Typically the file name is a relative path ending to a .proto file. The proto with the given name will have to have been added to this database using the Add method or else an error will be raised. Args: name: The file name to find. Returns: The file descriptor proto matching the name. Raises: KeyError if no file by the given name was added. """ return self._file_desc_protos_by_file[name] def FindFileContainingSymbol(self, symbol): """Finds the file descriptor proto containing the specified symbol. The symbol should be a fully qualified name including the file descriptor's package and any containing messages. Some examples: 'some.package.name.Message' 'some.package.name.Message.NestedEnum' The file descriptor proto containing the specified symbol must be added to this database using the Add method or else an error will be raised. Args: symbol: The fully qualified symbol name. Returns: The file descriptor proto containing the symbol. Raises: KeyError if no file contains the specified symbol. """ return self._file_desc_protos_by_symbol[symbol] def _ExtractSymbols(desc_proto, package): """Pulls out all the symbols from a descriptor proto. Args: desc_proto: The proto to extract symbols from. package: The package containing the descriptor type. Yields: The fully qualified name found in the descriptor. """ message_name = '.'.join((package, desc_proto.name)) yield message_name for nested_type in desc_proto.nested_type: for symbol in _ExtractSymbols(nested_type, message_name): yield symbol for enum_type in desc_proto.enum_type: yield '.'.join((message_name, enum_type.name))
maheshnakarmi/django-zoook-master
refs/heads/master
django_zoook/catalog/templatetags/product_products.py
2
# -*- coding: utf-8 -*- ############################################################################################ # # Zoook. OpenERP e-sale, e-commerce Open Source Management Solution # Copyright (C) 2011 Zikzakmedia S.L. (<http://www.zikzakmedia.com>). All Rights Reserved # $Id$ # # Module Created: 2012-07-04 # Author: Mariano Ruiz <[email protected]>, # Enterprise Objects Consulting (<http://www.eoconsulting.com.ar>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################################ from django import template register = template.Library() @register.inclusion_tag('catalog/tags/product_products.html', takes_context = True) def render_product_products(context): request = context['request'] values = [] for product in context['product_products']: url = product.product_tmpl.get_absolute_url() name = product.product_tmpl.name if product.product_tmpl.product_product_set.count() > 1: url += '?code=' + product.code.lower() name += ' - ' + product.variants base_image = product.get_base_image() values.append({ 'id': product.id, 'product': product.product_tmpl, 'name': name, 'url': url, 'price': product.get_price(), 'price_normal': product.price, 'price_special': product.price_special, 'position': product.product_tmpl.position, 'base_image': base_image }) # == order by position, name or price == try: values.sort(key=lambda x: x[request.session['order']], reverse = request.session['order_by'] == 'desc') except: pass context['values'] = values return context
neutrak/py3_markov
refs/heads/master
confuseus.py
1
#!/usr/bin/env python3 from py3net import * import socket import config import http_cat import markov import random import rpn import diff_tool import sys import time import ssl import json import errno import select import os import bcrypt #for the database backend which significantly reduces RAM use use_pg=False db_login=False try: import postgresql except ImportError: use_pg=False db_login=None SOURCE_CODE_URL='https://github.com/neutrak/py3_markov' MAX_IRC_LINE_LEN=(512) #debug state; can be always or never (might get expanded later) dbg_state='always' #debug history, how many previously generated debug messages are availabe dbg_hist=[] #a max, after which to rotate debug history dbg_hist_max=3 #NOTE: bot_nick, autojoin_channels, dbg_channels, host, port, ssl, authed_users, and ignored_users #are specified by the json config file; these are just defaults if values are not configured there #BEGIN JSON-configurable globals ======================================================== cmd_esc='!' bot_nick='confuseus' autojoin_channels=[] dbg_channels=[] host='ssl.irc.atw-inter.net' port=6697 use_ssl=True gen_cmd=True answer_questions=False qa_sets=[] #users allowed to !shup the bot #(aka clear outgoing queue) #TODO: replace authed_users with database-stored oplist user accounts for shup and debug command, etc. authed_users=[] #users to ignore (bots) #this is a blacklist, like /ignore in many clients ignored_users=[] #END JSON-configurable globals ========================================================== #NOTE: if use_pg is true this in-memory tell queue isn't used #and instead messages are stored in the tell_msg table in the postgres database #and they persist on restart #the tell queue is a list of messages that are meant to be sent to a particular user in a particular channel #along with meta information like who sent it and when tell_queue=[] #this is a single message that one user !tells another user #and is meant to be a single entry in the global tell_queue list class tell_msg: def __init__(self,time_sent,sender,nick,channel,content): self.time_sent=time_sent self.sender=sender self.nick=nick self.channel=channel self.content=content #a list of channels this bot is currently in, which includes user information #each channel entry is expected to be structured as follows (note that channel names and nicks are globally unique): #channel: { # names:{ # nick_a: { # mode:'o', # }, # nick_b: { # mode:'', #this means the user has no operator status # } # #NOTE: the bot itself is in this user list as well # } # last_op_rqst:<timestamp>, #the last time the bot asked for ops in this channel # } joined_channels={} seconds_bw_op_nag=14400 #4 hours #seconds_bw_op_nag=1800 #30 minutes #seconds_bw_op_nag=60 #debug; 1 minute #seconds_bw_op_nag=300 #debug; 5 minutes cmd_helptext={ 'wut':'generate text based on markov chains', 'example <command>':'display an example of a command and its output', 'dbg <always|never|#>':'enable/disable/show debug info about markov text generation (authorized uses can enable or disable, any users can get history)', 'help [command]':'if used in PM with no arguments shows a command list; if a command is given help text will be displayed only for that command', 'shup [min nice lvl]':'clears low-priority messages from sending queue (authorized users can clear higher priority messages)', 'part':'parts current channel (you can invite to me get back)', 'wiki <topic>':'grabs topic summary from wikipedia', 'define <word>':'checks definintion of word in gcide dictionary', 'source':'links the github url for this bot\'s source code', 'omdb <movie name>':'grabs movie information from the open movie database', 'splchk <word> [edit dist]':'checks given word against a dictionary and suggests fixes', 'dieroll [sides]':'generates random number in range [1,sides]', 'time [utc offset tz]':'tells current UTC time, or if a timezone is given, current time in that timezone', 'timecalc <%R> <tz1> <tz2>':'tells what the given time (%R == hours:minutes on a 24-hour clock) at the first utc-offset timezone will be at the second utc-offset timezone', 'seen-quit <nick>':'checks log files for last time when given nick was seen quitting (does NOT check if they\'re currently here)', 'oplist <add|rm|check> <user> [hostmask]':'allows channel operators to authorize/register other channel operators in a way that will persist between reconnections', #!login and !setpass documentation should include password requirement information 'login <pass> [channel]':'[PM ONLY] if you are an authorized channel operator, logs you in to that channel; passphrases must be at least 10 characters and contain no spaces; there are no other requirements', 'setpass <pass> [oldpass]':'[PM ONLY] sets a password for your channel operator account, if you have been invited to become a channel operator; if you have already set a password oldpass is required for authorization', 'tell <nick> <message>':'leaves a message for a user the next time they join this channel (not stored on disk; if the bot disconnects your message is lost)', } #a list of all unit conversions we currently support #this will be populated as the conversion functions get defined unit_conv_list=[] #a class to handle unit conversions in a generic way #having a seperate case for each was leading to a lot of unnecessary duplication class unit_conv: def __init__(self,dimension,from_abbr,from_disp,to_abbr,to_disp,conv_func): self.dimension=dimension self.from_abbr=from_abbr self.from_disp=from_disp self.to_abbr=to_abbr self.to_disp=to_disp self.conv_func=conv_func def chk_cmd(self,cmd_esc,cmd): #note this is case-insensitive; #HOPEFULLY this isn't a problem... if((cmd.lower())==(cmd_esc+self.from_abbr+'->'+self.to_abbr)): return True return False def output_conv(self,sock,channel,line_post_cmd): try: from_val=float(line_post_cmd) to_val=self.conv_func(from_val) py3queueln(sock,'PRIVMSG '+channel+' :'+round_nstr(from_val)+' '+self.from_disp+' is '+round_nstr(to_val)+' '+self.to_disp,1) except ValueError: py3queueln(sock,'PRIVMSG '+channel+' :Err: '+self.from_abbr+'->'+self.to_abbr+' requires a number, but I couldn\'t find one in your argument',1) #get a token from the given text, where token ends on the first instance of the substring delimiter def get_token(text,delimiter): success=False token='' delimiter_idx=text.find(delimiter) if(delimiter_idx>=0): token=text[0:delimiter_idx] text=text[delimiter_idx+len(delimiter):] success=True else: token=text text='' if(len(token)>0): success=True return (success,token,text) #unit conversion deg F to deg C def f_to_c(f): return (5.0/9.0)*(f-32) unit_conv_list.append(unit_conv('temperature','f','degrees F','c','degrees C',f_to_c)) #unit conversion deg C to deg F def c_to_f(c): return ((9.0/5.0)*c)+32 unit_conv_list.append(unit_conv('temperature','c','degrees C','f','degrees F',c_to_f)) #unit conversion feet to meters def ft_to_m(ft): return ft*0.3048 unit_conv_list.append(unit_conv('length','ft','feet','m','meters',ft_to_m)) #unit conversion meters to feet def m_to_ft(m): return m*3.281 unit_conv_list.append(unit_conv('length','m','meters','ft','feet',m_to_ft)) #unit conversion feet to centimeters def ft_to_cm(ft): return ft*30.48 unit_conv_list.append(unit_conv('length','ft','feet','cm','centimeters',ft_to_cm)) #unit conversion centimeters to feet def cm_to_ft(cm): return cm*0.03281 unit_conv_list.append(unit_conv('length','cm','centimeters','ft','feet',cm_to_ft)) #unit conversion kilograms to pounds (on earth) def kg_to_lb(kg): return kg*2.205 unit_conv_list.append(unit_conv('mass->force','kg','kilograms','lb','pounds under earth-surface gravity',kg_to_lb)) #unit conversion pounds (on earth) to kilograms def lb_to_kg(lb): return lb*0.4536 unit_conv_list.append(unit_conv('force->mass','lb','pounds under earth-surface gravity','kg','kilograms',lb_to_kg)) #unit conversion miles to kilometers def mi_to_km(mi): return mi*1.609344 unit_conv_list.append(unit_conv('length','mi','miles','km','kilometers',mi_to_km)) #unit conversion kilometers to miles def km_to_mi(km): return km/mi_to_km(1) unit_conv_list.append(unit_conv('length','km','kilometers','mi','miles',km_to_mi)) #unit conversion inches to centimeters def in_to_cm(inches): return inches*2.54 unit_conv_list.append(unit_conv('length','in','inches','cm','centimeters',in_to_cm)) #unit conversion centimeters to inches def cm_to_in(cm): return cm/in_to_cm(1) unit_conv_list.append(unit_conv('length','cm','centimeters','in','inches',cm_to_in)) #unit conversion fluid ounces to liters def oz_to_li(oz): return oz*0.02957 unit_conv_list.append(unit_conv('volume','oz','fluid ounces','l','liters',oz_to_li)) #unit conversion liters to fluid ounces def li_to_oz(li): return li/oz_to_li(1) unit_conv_list.append(unit_conv('volume','l','liters','oz','fluid ounces',li_to_oz)) #add generated help text for unit-conv messages for conversion in unit_conv_list: conversion_cmd=conversion.from_abbr+'->'+conversion.to_abbr+' <value>' help_str='converts '+conversion.dimension+' from '+conversion.from_disp+' to '+conversion.to_disp cmd_helptext[conversion_cmd]=help_str #determine if the given text is an odd number of question marks def odd_quest(txt): for idx in range(0,len(txt)): if(txt[idx]!='?'): return False if((len(txt)%2)==1): return True return False def learn_from(line,state_change,state_file,lines_since_write,lines_since_sort_chk): #writing back to the state file and checking the sorting are expensive operations #as such, they're not done every line, but only every n lines, as specified here lines_since_write+=1 lines_since_sort_chk+=1 check_sorted=False if(lines_since_sort_chk>=20): check_sorted=True lines_since_sort_chk=0 if((line.find('http://')<0) and (line.find('https://')<0)): state_change=markov.chain_from(line+"\n",state_change,prefix=['',''],check_sorted=check_sorted,use_pg=use_pg,db_login=db_login) else: print('Warn: Ignoring line \"'+line+'\" because it contained an http link') #for postgre writes are done on every line if(use_pg): lines_since_write=0 elif(lines_since_write>=60): markov.save_state_change_to_file(state_change,state_file) lines_since_write=0 return (lines_since_write,lines_since_sort_chk) def dbg_output(sock,dbg_str): global dbg_channels global dbg_state global dbg_hist global dbg_hist_max if(dbg_str!=''): #handle debug string history so users can ask later if(len(dbg_hist)>=dbg_hist_max): #if we've hit max history, then shift and push out the oldest element #note this is sorted from oldest to newest dbg_hist=dbg_hist[1:] # del(dbg_hist[0]) #put the new element at the end dbg_hist.append(dbg_str) #if set to, then output to debug channels if(dbg_state=='always'): for chan in dbg_channels: for line in dbg_str.split("\n"): if(line!=''): py3queueln(sock,'PRIVMSG '+chan+' :'+line[0:MAX_IRC_LINE_LEN-80],4) if(len(line[MAX_IRC_LINE_LEN-80:])>0): py3queueln(sock,'PRIVMSG '+chan+' :'+line[MAX_IRC_LINE_LEN-80:],4) # time.sleep(random.uniform(0.1,1.5)) #this gets the definition of a word out of the given dictionary def def_word(word,dict_root=os.path.join(os.environ['HOME'],'documents','gcide-0.51')): print('Looking up definitions for \''+word+'\'...') first_char=word[0] if(not first_char.isalpha()): return (False,'Err: word must start with alphabetical character') #get the correct dictionary file and slurp it in sub_dict_path=os.path.join(dict_root,'CIDE.'+first_char.upper()) try: fp=open(sub_dict_path,'rb') fcontent=fp.read().decode('latin-1') fp.close() except IOError: return (False,'Err: could not read dictionary file; is gcide-0.51 installed?') except UnicodeDecodeError: return (False,'Err: UnicodeDecodeError; your guess is as good as mine, dude') #check each word in the dictionary file for words which start with this letter #as we find entry blocks for this word, add them to the list for further parsing definitions=[] found_word=False entry_blocks=[] for line in fcontent.split("\n"): #if we found the word then just continue to get the whole block if(found_word): if(line==''): #this supports multiple entry blocks for the same word #(a break would only support one block, and hence one definition) found_word=False entry_blocks[len(entry_blocks)-1]+="\n"+line #check each entry for the word we're trying to define elif(line.startswith('<p><ent>')): ent_word=line[len('<p><ent>'):line.find('</ent>')] #if we found a definition for this word, then store the block #note this is case-sensitive if(ent_word==word): found_word=True print('Dbg: found word '+ent_word) entry_blocks.append(line) print('') #for each entry block, strip out the definition and anything else we may want #and discard the rest for entry_block in entry_blocks: entry_block=entry_block.strip("\n") entry_block=entry_block.replace('<br/','') print(entry_block+"\n") try: def_entry=entry_block[entry_block.find('<def>'):entry_block.find('</def>')] except: continue def_entries=[http_cat.html_strip_tags(def_entry)] #TODO: support parts of speech, other information about this word definitions+=def_entries #if no definitions were found, try again with an upper-case first letter, #or, if the first letter was already upper-case, return error if(len(definitions)==0): if(first_char==first_char.lower()): return def_word(first_char.upper()+word[1:],dict_root) return (False,'Err: no definition found') #one or more definitions was found, return success and the definitions return (True,definitions) #round so numbers look nice on IRC def round_nstr(num): return ('%10.5f' % num).lstrip(' ') #do substitutions which people expect from IRC but are really client-side def irc_str_map(line_post_cmd): if(line_post_cmd.startswith('/me')): line_post_cmd='\x01ACTION'+line_post_cmd[len('/me'):] return line_post_cmd #parse user and other information from a line text that was received from the server def parse_line_info(line): #get some information (user, nick, host, etc.) success,info,line=get_token(line,' ') info=info.lstrip(':') success,nick,info=get_token(info,'!') success,realname,info=get_token(info,'@') success,hostmask,info=get_token(info,' ') success,command,line=get_token(line,' ') channel='' if(command.upper()=='PRIVMSG'): success,channel,line=get_token(line,' ') #clean up any leading or trailing characters line=(line[1:] if line.startswith(':') else line) if(command.upper()=='JOIN'): channel=line return { 'info':info, 'nick':nick, 'realname':realname, 'hostmask':hostmask, 'command':command, 'channel':channel, 'content':line, } #TODO: update ALL py3queueln('PRIVMSG '...) calls to use this pm function instead #send a PRIVMSG to the server def pm(sock,channel,msg,priority=1): py3queueln(s=sock,message='PRIVMSG '+channel+' :'+msg,priority=priority) def send_tell_queue_msgs(sock,channel,nick): if(use_pg): tell_queue_msgs=markov.pg_run_query(db_login,'SELECT * FROM tell_msg WHERE LOWER(channel)=$1 AND LOWER(nick)=$2',(channel.lower(),nick.lower())) if(len(tell_queue_msgs)>0): for tell_entry in tell_queue_msgs: pm(sock,channel,'['+str(tell_entry['time_sent'])+'] <'+tell_entry['sender']+'> '+tell_entry['nick']+': '+tell_entry['content'],1) markov.pg_run_query(db_login,'DELETE FROM tell_msg WHERE LOWER(channel)=$1 AND LOWER(nick)=$2',(channel.lower(),nick.lower())) else: global tell_queue new_tell_queue=[] for tell_entry in tell_queue: #NOTE: nicknames are considered case-insensitive for the purpose of !tell if(nick.lower()==tell_entry.nick.lower()): pm(sock,channel,'['+str(tell_entry.time_sent)+'] <'+tell_entry.sender+'> '+tell_entry.nick+': '+tell_entry.content,1) else: new_tell_queue.append(tell_entry) tell_queue=new_tell_queue #handle conversions (stored in a generic unit_conv list) def handle_conversion(sock,cmd_esc,cmd,line_post_cmd,channel): global unit_conv_list handled=False for conversion in unit_conv_list: if(conversion.chk_cmd(cmd_esc,cmd)): conversion.output_conv(sock,channel,line_post_cmd) handled=True return handled #handle an omdb command def handle_omdb(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm): if(line_post_cmd!=''): title_words=line_post_cmd.rstrip(' ').split(' ') for i in range(0,len(title_words)): if(title_words[i][0]==title_words[i][0].lower()): title_words[i]=title_words[i][0].upper()+title_words[i][1:] url='http://www.omdbapi.com/?t='+('+'.join(title_words))+'&y=&plot=short&r=json' try: response=http_cat.get_page(url) except: py3queueln(sock,'PRIVMSG '+channel+' :Err: Could not retrieve data (weird characters in title?)',1) return response_type=response[0].split("\n")[0].rstrip("\r") if(response_type.find('200 OK')<0): py3queueln(sock,'PRIVMSG '+channel+' :Err: \"'+response_type+'\"',1) else: try: json_tree=json.loads(response[1]) except ValueError: py3queueln(sock,'PRIVMSG '+channel+' :Err: Could not parse json response from omdb',1) return #movie information now that retrieval is done title=config.get_json_param(json_tree,'Title') title='' if title==None else title rating=config.get_json_param(json_tree,'imdbRating') rating='' if rating==None else rating year=config.get_json_param(json_tree,'Year') year='' if year==None else year #remove unicode to be IRC-friendly year=year.replace('–','-') genre=config.get_json_param(json_tree,'Genre') genre='' if genre==None else genre plot=config.get_json_param(json_tree,'Plot') plot='' if plot==None else plot if((title=='') and (rating=='') and (year=='') and (genre=='') and (plot=='')): py3queueln(sock,'PRIVMSG '+channel+' :Err: No information (movie might not be in omdb, or might not exist)',1) else: py3queueln(sock,'PRIVMSG '+channel+' :'+title+' / '+rating+' / '+year+' / '+genre+' / '+plot,1) else: py3queueln(sock,'PRIVMSG '+channel+' :Err: omdb requires a movie title as a parameter',1) #handle a spellcheck command def handle_spellcheck(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm): dictionary=diff_tool.get_dictionary(hard_fail=False) #by default a word is close if it is one or fewer edits away from the given word edit_distance=1 chk_words=line_post_cmd.split(' ') #if requested, use a user-given edit distance to allow for more word suggestions #custom edit distance is the /last/ space-delimited argument #(multiple words may be given before it) if(len(chk_words)>1 and chk_words[-1].isdigit()): edit_distance=int(chk_words[-1]) chk_words=chk_words[0:len(chk_words)-1] #limit edit distance to <=5 though, #so we don't time out or get words that don't make any sense edit_distance=min(edit_distance,5) #how many words we can be requested to spell in a single call #words after this limit will be ignored max_words_per_line=2 words_on_line=0 for chk_word in chk_words: #skip words after the max if(words_on_line>=max_words_per_line): break #check this word; spellcheck uses a edit distance based fuzzy match internally #note that transpositions are included as a special case within the spellcheck function spellcheck_output='' match,close_words=diff_tool.spellcheck(chk_word,dictionary,edit_distance) if(match): spellcheck_output+='CORRECT: \''+chk_word+'\' is in my dictionary' else: spellcheck_output+='INCORRECT: \''+chk_word+'\' is NOT in my dictionary' if(len(close_words)>0): spellcheck_output+='; you may mean: ' print('[dbg] for \''+chk_word+'\': close_words='+str(close_words)) max_fix_words=8 fix_word_cnt=0 for fix_word in close_words: if(fix_word_cnt>=max_fix_words): break if(fix_word_cnt!=0): spellcheck_output+=', ' spellcheck_output+=fix_word fix_word_cnt+=1 if(fix_word_cnt>=max_fix_words): spellcheck_output+=', ...' py3queueln(sock,'PRIVMSG '+channel+' :'+spellcheck_output,1) words_on_line+=1 def handle_timecalc(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm): arguments=line_post_cmd.split(' ') if(len(arguments)<3): py3queueln(sock,'PRIVMSG '+channel+' :Err: Too few arguments given to '+cmd_esc+'timecalc command; Usage: '+cmd_esc+'timecalc <%R> <tz1> <tz2>',1) return #parse out %R #%R means <hours (0-23)>:<minutes (0-60)> #the time is valid until we're missing something we need or an exception is thrown valid_time=True time_str=arguments[0] time_list=time_str.split(':') #note that we use < instead of != because if seconds are given that's okay we just ignore them if(len(time_list)<2): valid_time=False hours=0 minutes=0 try: hours=int(time_list[0]) minutes=int(time_list[1]) except ValueError: valid_time=False #note that leap seconds can cause a valid 23:60 time, but we don't consider that if(hours<0 or hours>=24 or minutes<0 or minutes>=60): valid_time=False if(not valid_time): py3queueln(sock,'PRIVMSG '+channel+' :Err: Invalid time given; syntax is <hours>:<minutes> where 0<=hours<=23, 0<=minutes<=59',1) return #save off the given time so we can manipulate the hours and minutes to calculate for the second timezone #this is what the time is in the first timezone given_hours=hours given_minutes=minutes #now get the timezones from the remaining arguments #(which we know exist because we did a check earlier) tz_1_str=arguments[1] tz_2_str=arguments[2] #these are utc offsets tz_1=0 tz_2=0 try: tz_1=int(tz_1_str) tz_2=int(tz_2_str) except ValueError: #note we re-use the valid_time variable here #in order to save memory, and since if it was previously false we would have already returned valid_time=False if(not valid_time): py3queueln(sock,'PRIVMSG '+channel+' :Err: Invalid timezone(s) given; should be an integer value representing UTC offset',1) return #if we got here then we have a valid time, and 2 valid timezones #time to do the real calculation! tz_diff=(tz_2-tz_1) hours+=tz_diff #calculate carry (for when someone is a day different due to clock rollover) day_diff=0 if(hours>23): hours-=24 day_diff=1 elif(hours<0): hours+=24 day_diff=-1 #pretty formatting by prepending 0s when numbers are <10 given_hours_str=str(given_hours) if(len(given_hours_str)<2): given_hours_str='0'+given_hours_str given_minutes_str=str(given_minutes) if(len(given_minutes_str)<2): given_minutes_str='0'+given_minutes_str hours_str=str(hours) if(len(hours_str)<2): hours_str='0'+hours_str minutes_str=str(minutes) if(len(minutes_str)<2): minutes_str='0'+minutes_str py3queueln(sock,'PRIVMSG '+channel+' :'+given_hours_str+':'+given_minutes_str+' at UTC '+tz_1_str+' is '+hours_str+':'+minutes_str+(' the next day' if day_diff>0 else (' the previous day' if day_diff<0 else ''))+' at UTC '+tz_2_str,1) def handle_wiki(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm,hostmask): #TODO: handle more specific errors; this is super nasty but should keep the bot from crashing try: wiki_search=line_post_cmd.replace(' ','%20') wiki_url='https://en.wikipedia.org/w/api.php?action=opensearch&format=json&search='+wiki_search+'&limit=2&namespace=0' # response=http_cat.get_page(wiki_url) #HTTPS generally uses port 443, rather than port 80 response=http_cat.get_page(wiki_url,443) response_type=response[0].split("\n")[0].rstrip("\r") #if we get a 301 moved and the page requested was lower case then #before giving up try it as upper-case if((response_type.find('301 Moved')>=0) and (line_post_cmd[0]==line_post_cmd[0].lower())): return handle_bot_cmd(sock,cmd_esc, cmd, (line_post_cmd[0].upper())+(line_post_cmd[1:]), channel, nick,is_pm,hostmask,state_change,use_pg,db_login) if(response_type.find('200 OK')<0): py3queueln(sock,'PRIVMSG '+channel+' :Err: \"'+response_type+'\"',1) else: wiki_text=response[1] if(wiki_text==''): py3queueln(sock,'PRIVMSG '+channel+' :Err: wiki got null page text',1) else: print(wiki_text) #debug #parse JSON and output the juicy bits wiki_json=json.loads(wiki_text) #disambiguate? valid_output=True if(len(wiki_json[1])>1): for n in range(0,len(wiki_json[1])): if(wiki_json[1][n].lower()==line_post_cmd.lower()): break else: py3queueln(sock,'PRIVMSG '+channel+' :Please disambiguate; you may want one of the following: '+', '.join(wiki_json[1])) valid_output=False if(len(wiki_json[3])==0): py3queueln(sock,'PRIVMSG '+channel+' :Err: No wikipedia pages found for \"'+line_post_cmd+'\"') valid_output=False if(valid_output): output_text=' '.join(wiki_json[2]) reserved_len=len('PRIVMSG '+channel+' :...'+"\r\n") if(len(output_text)>=(MAX_IRC_LINE_LEN-reserved_len)): # output_text=output_text[0:(MAX_IRC_LINE_LEN-reserved_len)]+'...' output_text=output_text[0:MAX_IRC_LINE_LEN]+'...' py3queueln(sock,'PRIVMSG '+channel+' :'+output_text,1) #link the wiki page itself? py3queueln(sock,'PRIVMSG '+channel+' :'+' '.join(wiki_json[3]),1) except: py3queueln(sock,'PRIVMSG '+channel+' :Err: wiki failed to get page text',1) def handle_define(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm): #what's the word, dawg? word=line_post_cmd #get all the definitions of the word from the local dictionary success,definitions=def_word(word) #if definitions were found, then output those if(success): def_line=word+': ' for i in range(0,len(definitions)): if(i!=0): def_line+=' | ' def_line+='('+str(i)+') '+definitions[i] py3queueln(sock,'PRIVMSG '+channel+' :'+def_line[0:MAX_IRC_LINE_LEN]) #no definitions found; output the error message else: err_msg=definitions py3queueln(sock,'PRIVMSG '+channel+' :'+err_msg) #display an example of the given command def handle_example(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,state_change,use_pg,db_login): if((len(line_post_cmd)>0) and (not line_post_cmd.startswith(cmd_esc))): line_post_cmd=cmd_esc+line_post_cmd if(line_post_cmd==''): py3queueln(sock,'PRIVMSG '+channel+' :Err: Missing argument (the command); see '+cmd_esc+'help for a command list',1) elif(line_post_cmd==(cmd_esc+'wut')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'wut',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'wut','',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'example')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'example '+cmd_esc+'wut',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'example',cmd_esc+'wut',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'dbg')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'dbg',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'dbg','',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'shup')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'shup 4',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'shup','4',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'calc')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'calc 10*9^-3',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'calc','10*9^-3',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'wiki')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'wiki wikipedia',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'wiki','wikipedia',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'define')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'define dictionary',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'define','dictionary',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'omdb')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'omdb Airplane!',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'omdb','Airplane!',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif((line_post_cmd==(cmd_esc+'splchk')) or (line_post_cmd==(cmd_esc+'sp')) or (line_post_cmd==(cmd_esc+'spellcheck'))): #intentional misspelling to demonstrate spellcheck ability py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'splchk misspeling',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'splchk','misspeling',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'dieroll')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'dieroll 6',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'dieroll','6',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'time')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'time -6',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'time','-6',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'timecalc')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'timecalc 12:00 -6 +0',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'timecalc','12:00 -6 +0',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'seen-quit')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'seen-quit neutrak',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'seen-quit','neutrak',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif(line_post_cmd==(cmd_esc+'oplist')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'oplist add neutrak',1) handle_bot_cmd(sock,cmd_esc,cmd_esc+'oplist','add neutrak',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif((line_post_cmd==(cmd_esc+'login')) or (line_post_cmd==(cmd_esc+'setpass'))): py3queueln(sock,'PRIVMSG '+channel+' :Warn: command '+line_post_cmd+' is only valid in PM and contains sensitive information, so it does not have an example listed here',1) elif(line_post_cmd==(cmd_esc+'tell')): py3queueln(sock,'PRIVMSG '+channel+' :'+cmd_esc+'tell '+nick+' Hello') handle_bot_cmd(sock,cmd_esc,cmd_esc+'tell',nick+' Hello',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) elif((line_post_cmd==(cmd_esc+'help')) or (line_post_cmd==(cmd_esc+'part')) or (line_post_cmd==(cmd_esc+'source'))): py3queueln(sock,'PRIVMSG '+channel+' :Warn: '+line_post_cmd+' takes no arguments and so has no examples; see '+cmd_esc+'help for information about it',1) else: for conversion in unit_conv_list: conv_cmd=(cmd_esc+conversion.from_abbr+'->'+conversion.to_abbr) if(line_post_cmd==conv_cmd): py3queueln(sock,'PRIVMSG '+channel+' :'+conv_cmd+' 1',1) handle_bot_cmd(sock,cmd_esc,conv_cmd,'1',channel,nick,is_pm,hostmask,state_change,use_pg,db_login) break else: py3queueln(sock,'PRIVMSG '+channel+' :Err: Unrecognized argument ('+line_post_cmd+'); see '+cmd_esc+'help for a command list',1) def handle_help(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm): global unit_conv_list global cmd_helptext if((len(line_post_cmd)>0) and (not line_post_cmd.startswith(cmd_esc))): line_post_cmd=cmd_esc+line_post_cmd help_cmd=line_post_cmd.split(' ')[0] help_cmd_exists=len(help_cmd)>0 found_help_cmd=False if((is_pm) and (not help_cmd_exists)): pm(sock,channel,'This is a simple markov chain bot',3) #display all help text for every command in a big long list #if an argument is given, only help for that text will be shown for cmd_w_args in cmd_helptext: cmd=cmd_w_args.split(' ')[0] #accept /(cmd_esc)?(command)/ as an argument #and if given only output the help text for that single command #instead of everything in a big long list #if no argument was given if((not help_cmd_exists) or (help_cmd==cmd_esc+cmd)): if(is_pm or help_cmd_exists): help_str=cmd_esc+cmd_w_args while(len(help_str)<len(cmd_esc+'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')): help_str+=' ' help_str+='-> '+cmd_helptext[cmd_w_args] pm(sock,channel,help_str,3) #if a command parameter was given #only output the help text for that single command #instead of everything in a big long list if(help_cmd_exists and (help_cmd==cmd_esc+cmd)): found_help_cmd=True break if(help_cmd_exists and (not found_help_cmd)): pm(sock,channel,'Err: Unrecognzied command '+help_cmd+'; you can send !help in PM to get a full command list') if((not is_pm) and (not help_cmd_exists)): pm(sock,channel,'This is a simple markov chain bot; use '+cmd_esc+'wut or address me by name to generate text; PM !help for more detailed help; !help !command for detailed information about a particular command',3) #check when a user was last seen def handle_seen(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm,log_file='log.txt'): import datetime #read the last 5000 lines (or more if they're short) backlog_chars=(512*5000) #or the whole file, if it's smaller than that file_size=os.path.getsize(log_file) fp=open(log_file,'r') fp.seek(0,os.SEEK_END) fp.seek(file_size-min(file_size,backlog_chars)) fcontent=fp.read() fp.close() #start at the first complete line #no partial line parsing nl_idx=fcontent.find("\n") if(nl_idx>=0): fcontent=fcontent[nl_idx+1:] #time the user was last seen, as a *nix timestamp string last_seen_time='0' #look for QUIT lines with the following format #1467596281 :BasmatiRice!uid32945@hostmask QUIT :"Connection closed for inactivity" for line in fcontent.split("\n"): sp_idx=line.find(' ') if(sp_idx<0): continue #store timestamp so we can say when the user quit timestamp=line[0:sp_idx] line=line[sp_idx+1:] #skip PRIVMSG, PING, etc. if(not line.startswith(':')): continue #get the nick that quit line=line[1:] bang_idx=line.find('!') if(bang_idx<0 or bang_idx>30): continue nick=line[0:bang_idx] line=line[bang_idx+1:] #if this isn't who we were looking for then skip it if(nick.lower()!=line_post_cmd.lower()): continue sp_idx=line.find(' ') if(sp_idx>=0 and line[sp_idx+1:].startswith('QUIT')): print('[dbg] '+timestamp+' :'+line) last_seen_time=timestamp #if this wasn't a quit ignore it if(last_seen_time=='0'): py3queueln(sock,'PRIVMSG '+channel+' :Warn: I don\'t have any recent QUITs from nick '+line_post_cmd+' in my logs; I might not have been there; they might not have existed; no idea, man',3) else: pretty_time=datetime.datetime.utcfromtimestamp(int(last_seen_time)).strftime('%Y-%m-%d %H:%M:%S UTC') py3queueln(sock,'PRIVMSG '+channel+' :Nick '+line_post_cmd+' was last seen quitting a channel I was in at '+pretty_time+' ('+last_seen_time+'); check if they\'re here now; I don\'t do that',3) def user_mode_letter(user_mode_symbol): #channel ops if(user_mode_symbol=='@'): return 'o' #half channel ops elif(user_mode_symbol=='%'): return 'h' #voice elif(user_mode_symbol=='+'): return 'v' return '' #returns the user mode letters (o,h,v) rather than symbols (@,%,+) #also returns the value of the nick without these symbols included def user_mode_symbols_to_letters(nick_with_mode_symbols): user_mode_symbols=nick_with_mode_symbols user_mode_letters='' idx=0 while((idx<len(user_mode_symbols)) and (user_mode_letter(user_mode_symbols[idx])!='')): user_mode_letters=user_mode_letter(user_mode_symbols[idx]) idx+=1 #2nd return value is the user nick, when this is used on a nick string nick_sans_mode=(user_mode_symbols[idx:] if (idx<len(user_mode_symbols)) else '') return user_mode_letters,nick_sans_mode #returns True if this user is a channel operator #otherwise returns False def is_channel_operator(channel,nick): if(channel in joined_channels): if(nick in joined_channels[channel]['names']): nick_mode=joined_channels[channel]['names'][nick]['mode'] if(nick_mode.find('o')>=0): return True return False def require_pg(sock,cmd_esc,cmd,channel): if(not use_pg): py3queueln(sock,'PRIVMSG '+channel+' :Err: '+cmd_esc+cmd+' is only valid if a postgres database is in use; ask the bot operator to fix the configuration to allow this command to be used',1) return False return True def handle_oplist_add(sock,cmd_esc,cmd,args,channel,nick,is_pm,new_op_nick,db_handle,user_results,channel_results): if(len(args)<3): py3queueln(sock,'PRIVMSG '+channel+' :Err: you must provide the hostmask argument when adding a channel operator; it should be the hostmask that user is currently connected from',1) return #NOTE: the hostmask of the command is the channel operator that is adding the user #so we can't just use the hostmask that was part of this command #but rather we need to take this hostmask as an argument hostmask=args[2] if((len(user_results)>0) and (user_results[0]['pass_hash'] is None)): pg_query='UPDATE user_accounts SET hostmasks=$1 WHERE nick=$2' postgre_ret=db_handle.prepare(pg_query) update_result=postgre_ret([hostmask],new_op_nick) #if this user is already authorized for this channel, just say so and return if((len(channel_results)>0) and (len(user_results)>0)): if(user_results[0]['pass_hash'] is None): py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' has already been invited using hostmask '+str(list(user_results[0]['hostmasks']))+' but has not set a password with '+cmd_esc+'setpass. Hostmask has been updated to '+str([hostmask])+' but a password still needs to be set. ',1) else: py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' already has an account with mode +'+channel_results[0]['mode_str']+' and cannot be added again',1) #if this user is not yet authorized for this channel but exists in the oplist_users table, #then add an associated entry for user_channel_modes elif(len(user_results)>0): pg_query='INSERT INTO user_channel_modes (nick,channel,mode_str) VALUES ($1,$2,$3)' postgre_ret=db_handle.prepare(pg_query) insert_result=postgre_ret(new_op_nick,channel,'o') py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' was successfully granted channel ops on '+channel,1) #and grant them ops now py3queueln(sock,'MODE '+channel+' +o '+new_op_nick,1) #if this user is not authorized and has never been authorized before, #create a new database entry with their current hostmask used to idenitfy them and null password #they will be identified by hostmask until they set a password else: pg_query='INSERT INTO user_accounts (nick,pass_hash,hostmasks) VALUES ($1,$2,$3)' postgre_ret=db_handle.prepare(pg_query) insert_result=postgre_ret(new_op_nick,None,[hostmask]) pg_query='INSERT INTO user_channel_modes (nick,channel,mode_str) VALUES ($1,$2,$3)' postgre_ret=db_handle.prepare(pg_query) insert_result=postgre_ret(new_op_nick,channel,'o') py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' was added to the channel op list for '+channel+' and will now need to set their password with !setpass in PM before disconnecting in order to complete account setup',1) def handle_oplist_rm(sock,cmd_esc,cmd,args,channel,nick,is_pm,new_op_nick,db_handle,user_results,channel_results): #remove the specified users from the list of channel operators for this channel pg_query='DELETE FROM user_channel_modes WHERE nick=$1 AND channel=$2' postgre_ret=db_handle.prepare(pg_query) delete_result=postgre_ret(new_op_nick,channel) #if the user currently has channel ops, de-op them if(new_op_nick in joined_channels[channel]['names']): py3queueln(sock,'MODE '+channel+' -o '+new_op_nick,1) py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' has been removed from the channel op list for '+channel+'; their mode authorizations on other channels remain unchanged',1) def handle_oplist(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,use_pg,db_login): if(not require_pg(sock,cmd_esc,cmd,channel)): return if(not (is_channel_operator(channel,nick))): py3queueln(sock,'PRIVMSG '+channel+' :Err: '+cmd_esc+'oplist can only be used by channel operators; come back when you have ops',1) return if(not (is_channel_operator(channel,bot_nick))): py3queueln(sock,'PRIVMSG '+channel+' :Err: '+cmd_esc+'oplist can only be used if this bot has channel operator permission; grant me ops first',1) return args=line_post_cmd.split(' ') if(len(args)<2): py3queueln(sock,'PRIVMSG '+channel+' :Usage: '+cmd_esc+'oplist <add|rm|check> <user> [hostmask]',1) return new_op_nick=args[1] db_handle=postgresql.open('pq://'+pg_user+':'+pg_passwd+'@localhost/'+pg_dbname) pg_query='SELECT * FROM user_accounts WHERE nick=$1' postgre_ret=db_handle.prepare(pg_query) user_results=postgre_ret(new_op_nick) pg_query='SELECT * FROM user_channel_modes WHERE channel=$1 AND nick=$2' postgre_ret=db_handle.prepare(pg_query) channel_results=postgre_ret(channel,new_op_nick) if(args[0]=='add'): handle_oplist_add(sock,cmd_esc,cmd,args,channel,nick,is_pm,new_op_nick,db_handle,user_results,channel_results) db_handle.close() return elif(args[0]=='rm'): handle_oplist_rm(sock,cmd_esc,cmd,args,channel,nick,is_pm,new_op_nick,db_handle,user_results,channel_results) db_handle.close() return elif((args[0]=='check') or (args[0]=='status')): #if this user is already authorized for this channel, just say so and return if((len(channel_results)>0) and (len(user_results)>0)): if(user_results[0]['pass_hash'] is None): py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' has already been invited as an operator of '+channel+' using hostmask '+str(user_results[0]['hostmasks'])+' but has not set a password with '+cmd_esc+'setpass. ',1) else: py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' already has an account with mode +'+channel_results[0]['mode_str']+' on this channel',1) elif(len(user_results)>0): py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' has an account registered with this bot but does not have channel ops on channel '+channel,1) else: py3queueln(sock,'PRIVMSG '+channel+' :User '+new_op_nick+' has no account registered with this bot',1) db_handle.close() return else: py3queueln(sock,'PRIVMSG '+channel+' :Err: Unrecognized subcommand '+args[0],1) db_handle.close() def handle_login(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,use_pg,db_login): if(not require_pg(sock,cmd_esc,cmd,channel)): return if(not is_pm): py3queueln(sock,'PRIVMSG '+channel+' :Err: '+cmd_esc+'login is only valid in PM; you should change your password IMMEDIATELY with '+cmd_esc+'setpass',1) return args=line_post_cmd.split(' ') if(len(args)<1): py3queueln(sock,'PRIVMSG '+channel+' :Usage: '+cmd_esc+'login <pass> [channel]',1) return db_handle=postgresql.open('pq://'+pg_user+':'+pg_passwd+'@localhost/'+pg_dbname) pg_query='SELECT * FROM user_accounts WHERE nick=$1' postgre_ret=db_handle.prepare(pg_query) user_results=postgre_ret(nick) pg_query='SELECT * FROM user_channel_modes WHERE nick=$1' postgre_ret=db_handle.prepare(pg_query) channel_results=postgre_ret(nick) if(len(user_results)<1): py3queueln(sock,'PRIVMSG '+channel+' :Err: You cannot log in because you do not have an account. Ask a channel operator to add you using '+cmd_esc+'oplist first, and make sure they specify your hostmask correctly',1) db_handle.close() return #if the user hasn't prevoiusly set a password if(user_results[0]['pass_hash'] is None): py3queueln(sock,'PRIVMSG '+channel+' :Err: You cannot log in because you have not set a password. Set one with '+cmd_esc+'setpass first.',1) db_handle.close() return pw_hash=user_results[0]['pass_hash'].encode('utf-8') if(bcrypt.hashpw(args[0].encode('utf-8'),pw_hash)!=pw_hash): py3queueln(sock,'PRIVMSG '+channel+' :Err: The provided password value is incorrect; try again',1) db_handle.close() return db_handle.close() for channel_dict in channel_results: #if a channel argument was given then only apply mode for that channel if(len(args)>=2): if(channel_dict['channel']!=args[1]): continue #if no channel as provided then grant modes in ALL channels this user is authorized for for mode_chr in channel_dict['mode_str']: py3queueln(sock,'MODE '+channel_dict['channel']+' +'+mode_chr+' '+nick,1) py3queueln(sock,'PRIVMSG '+channel+' :You are now logged in'+(' to channel '+args[1] if len(args)>=2 else ''),1) def handle_setpass(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,use_pg,db_login): if(not require_pg(sock,cmd_esc,cmd,channel)): return if(not is_pm): py3queueln(sock,'PRIVMSG '+channel+' :Err: '+cmd_esc+'setpass is only valid in PM (and use a different password from the one you just posted in the channel...)',1) return args=line_post_cmd.split(' ') if(len(args)<1): py3queueln(sock,'PRIVMSG '+channel+' :Usage: '+cmd_esc+'setpass <pass> [oldpass]',1) return db_handle=postgresql.open('pq://'+pg_user+':'+pg_passwd+'@localhost/'+pg_dbname) pg_query='SELECT * FROM user_accounts WHERE nick=$1' postgre_ret=db_handle.prepare(pg_query) user_results=postgre_ret(nick) if(len(user_results)<1): py3queueln(sock,'PRIVMSG '+channel+' :Err: You cannot set a password because you do not have an account. Ask a channel operator to add you using '+cmd_esc+'oplist first, and make sure they specify your hostmask correctly',1) db_handle.close() return #if the user hasn't prevoiusly set a password if(user_results[0]['pass_hash'] is None): #then first make sure the hostmask matches what's on file #since we don't have a password #we're authenticating using a combination of username and hostmask until a password is set if(not (hostmask in (user_results[0]['hostmasks']))): py3queueln(sock,'PRIVMSG '+channel+' :Err: You cannot set a password because you do not have an account. Ask a channel operator to add you using '+cmd_esc+'oplist first',1) db_handle.close() return elif(len(args)<2): py3queueln(sock,'PRIVMSG '+channel+' :Err: You must provide your old password when setting a new one',1) db_handle.close() return else: pw_hash=user_results[0]['pass_hash'].encode('utf-8') if(bcrypt.hashpw(args[1].encode('utf-8'),pw_hash)!=pw_hash): py3queueln(sock,'PRIVMSG '+channel+' :Err: The provided oldpass value is incorrect; try again',1) db_handle.close() return if(len(args[0])<10): py3queueln(sock,'PRIVMSG '+channel+' :Err: Passwords must be at least 10 characters long',1) db_handle.close() return #if we got here and didn't return then the user's hostmask matches what we have #OR their oldpass login was correct #so set their password now salt=bcrypt.gensalt() pw_hash=bcrypt.hashpw(args[0].encode('utf-8'),salt) pg_query='UPDATE user_accounts SET pass_hash=$1 WHERE nick=$2' postgre_ret=db_handle.prepare(pg_query) postgre_ret(pw_hash.decode('utf-8'),nick) py3queueln(sock,'PRIVMSG '+channel+' :Passphrase set successfully! Remember your password because we cannot recover it (though we can reset it)',1) db_handle.close() def handle_tell(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,use_pg,db_login): import datetime global tell_queue if(is_pm): py3queueln(sock,'PRIVMSG '+channel+' :Err: This is a PM. This only works in a channel; messages are sent the next time the user joins that channel. ') return False from_nick=nick success,to_nick,content=get_token(line_post_cmd,' ') if(not success): py3queueln(sock,'PRIVMSG '+channel+' :Err: Wrong argument structure; Usage: '+cmd_esc+'tell <nick> <message>') return False for ch_nick in joined_channels[channel]['names']: if(to_nick.lower()==ch_nick.lower()): py3queueln(sock,'PRIVMSG '+channel+' :Err: That user is already in this channel; they heard you. ') return False utc_now=datetime.datetime.utcnow() utc_now-=datetime.timedelta(microseconds=utc_now.microsecond) utc_now_str=utc_now.isoformat() if(use_pg): markov.pg_run_query(db_login,'INSERT INTO tell_msg (time_sent,sender,nick,channel,content) VALUES ($1,$2,$3,$4,$5)',(utc_now_str,from_nick,to_nick,channel,content)) else: tell_queue.append(tell_msg(utc_now_str,from_nick,to_nick,channel,content)) py3queueln(sock,'PRIVMSG '+channel+' :Message stored. I will tell them your message the next time they join '+channel) return True def handle_bot_cmd(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,state_change,use_pg,db_login): global gen_cmd global unit_conv_list global dbg_channels global dbg_state global dbg_hist global dbg_hist_max global qa_sets handled=False dbg_str='' #check if this was a bot command if((cmd==(cmd_esc+'wut')) or (cmd==cmd_esc)): output='' if(line_post_cmd!=''): output,dbg_str=markov.gen_from_str(state_change,use_pg,db_login,irc_str_map(line_post_cmd),random.randint(0,1)+1,retries_left=3,qa_sets=qa_sets) if(output==''): output,dbg_str=markov.generate(state_change,use_pg=use_pg,db_login=db_login,back_gen=False) #properly close CTCP when it's generated if(output.startswith('\x01ACTION') and (not output.endswith('\x01'))): output+='\x01' #prevent generating commands directed towards other bots, #if configured to do that if(not gen_cmd): if(output.startswith('!')): output='\\'+output py3queueln(sock,'PRIVMSG '+channel+' :'+output,1) # dbg_str='[dbg] generated from line \"'+line_post_cmd+'\"'+"\n"+dbg_str dbg_str='[dbg] (\"'+line_post_cmd+'\") '+dbg_str handled=True elif(cmd==(cmd_esc+'example')): handle_example(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,state_change,use_pg,db_login) handled=True elif(cmd==(cmd_esc+'dbg') or cmd==(cmd_esc+'debug')): #set debug channel ON if authorized if(line_post_cmd=='always'): if(nick in authed_users): dbg_state='always' py3queueln(sock,'PRIVMSG '+channel+' :Info: Now outputting debug messages in '+(','.join(dbg_channels))+' without being asked',1) else: py3queueln(sock,'PRIVMSG '+channel+' :Err: You are not authorized to change debug settings',1) #set debug channel OFF if authorized elif(line_post_cmd=='never'): if(nick in authed_users): dbg_state='never' py3queueln(sock,'PRIVMSG '+channel+' :Info: No longer outputting debug messages without being asked',1) else: py3queueln(sock,'PRIVMSG '+channel+' :Err: You are not authorized to change debug settings',1) #no argument or an index means display some debug info from the history elif(line_post_cmd.strip()=='' or line_post_cmd.isdigit()): #print the entire debug history to the console a line at a time for hist in dbg_hist: print(hist) #digits are reverse indices into the debug history hist_ofst=0 if(line_post_cmd.isdigit()): hist_ofst=int(line_post_cmd) #bounds checking for security and to prevent crashing if(hist_ofst<0 or hist_ofst>=len(dbg_hist)): hist_ofst=0 py3queueln(sock,'PRIVMSG '+channel+' :Warn: Invalid history offset; displaying last debug value',1) #if no argument is given then assume the user wanted the last debug message if(len(dbg_hist)>0): # py3queueln(sock,'PRIVMSG '+channel+' :'+dbg_hist[len(dbg_hist)-1-hist_ofst],2) line=dbg_hist[len(dbg_hist)-1-hist_ofst] py3queueln(sock,'PRIVMSG '+channel+' :'+line[0:MAX_IRC_LINE_LEN-80],2) if(len(line[MAX_IRC_LINE_LEN-80:])>0): py3queueln(sock,'PRIVMSG '+channel+' :'+line[MAX_IRC_LINE_LEN-80:],2) else: py3queueln(sock,'PRIVMSG '+channel+' :Err: No debug history exists',1) else: py3queueln(sock,'PRIVMSG '+channel+' :Err: Unrecognized argument given to dbg, \''+line_post_cmd+'\'',1) handled=True elif(cmd==(cmd_esc+'help')): handle_help(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm) handled=True #clear (low-priority) messages from the output queue elif((cmd==(cmd_esc+'shup')) or (cmd==(cmd_esc+'shoo'))): #the minimum nice value to clear messages from the output queue nice_lvl=4 try: nice_lvl=int(line_post_cmd.strip(' ')) except ValueError: nice_lvl=4 #authorized users can suppress high-priority output if(nick in authed_users): nice_lvl=max(nice_lvl,1) #unauthorized users can only suppress low-priority output else: nice_lvl=max(nice_lvl,4) py3clearq(nice_lvl) py3queueln(sock,'PRIVMSG '+channel+' :Info: outgoing message queue cleared of low-priority messages (nice_lvl='+str(nice_lvl)+')',1) handled=True elif(cmd==(cmd_esc+'part')): if(not is_pm): #only allow !part to be issued by channel operators, not normal users #since this bot will now handle oplist-related tasks as well if(is_channel_operator(channel,nick)): py3queueln(sock,'PART '+channel+' :Goodbye for now (you can invite me back any time)',1) else: py3queueln(sock,'PRIVMSG '+channel+' :Err: '+cmd_esc+'part can only be used by channel operators; come back when you have ops',1) else: py3queueln(sock,'PRIVMSG '+channel+' :part from where, asshole? this is a PM!',1) handled=True #conversions are their own function now elif(handle_conversion(sock,cmd_esc,cmd,line_post_cmd,channel)): handled=True elif(cmd==(cmd_esc+'calc')): try: err_msgs,result=rpn.rpn_eval(rpn.rpn_translate(line_post_cmd)) if(len(result)==1): py3queueln(sock,'PRIVMSG '+channel+' :'+str(result[0]),1) else: py3queueln(sock,'PRIVMSG '+channel+' :Warn: An error occurred during evaluation; simplified RPN expression is '+str(result),1) for err_idx in range(0,len(err_msgs)): py3queueln(sock,'PRIVMSG '+channel+' :Err #'+str(err_idx)+': '+str(err_msgs[err_idx]),3) except ValueError: py3queueln(sock,'PRIVMSG '+channel+' :Err: Could not parse expression (ValueError) (divide by zero?)',1) except IndexError: py3queueln(sock,'PRIVMSG '+channel+' :Err: Could not parse expression (IndexError) (mismatched parens?)',1) except: py3queueln(sock,'PRIVMSG '+channel+' :Err: Unhandled exception in rpn parsing; tell neutrak the command you used to get this and he\'ll look into it',1) handled=True elif(cmd==(cmd_esc+'wiki')): handle_wiki(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm,hostmask) handled=True #add wiktionary or some other dictionary with definitions if at all reasonable to do #(we're using gcide) elif(cmd==(cmd_esc+'define')): handle_define(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm) handled=True elif(cmd==(cmd_esc+'source')): py3queueln(sock,'PRIVMSG '+channel+' :bot source code: '+SOURCE_CODE_URL,1) handled=True elif((cmd==(cmd_esc+'omdb')) or (cmd==(cmd_esc+'imdb'))): handle_omdb(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm) handled=True elif((cmd==(cmd_esc+'splchk')) or (cmd==(cmd_esc+'spellcheck')) or (cmd==(cmd_esc+'sp')) or (cmd==(cmd_esc+'spell'))): handle_spellcheck(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm) handled=True elif(cmd==(cmd_esc+'dieroll')): sides=6 if(line_post_cmd!=''): try: sides=int(line_post_cmd) except ValueError: py3queueln(sock,'PRIVMSG '+channel+' :Warn: Invalid number of sides, assuming d-6',1) sides=6 if(sides<1): py3queueln(sock,'PRIVMSG '+channel+' :Warn: Number of sides less than 1, setting number of sides 1 (this will return 1)',1) sides=1 value=random.randint(1,sides) py3queueln(sock,'PRIVMSG '+channel+' :Rolled a '+str(value)+' with a d'+str(sides),1) handled=True elif(cmd==(cmd_esc+'time')): tz=0 if(line_post_cmd!=''): try: tz=float(line_post_cmd) except ValueError: py3queueln(sock,'PRIVMSG '+channel+' :Err: '+line_post_cmd+' is not a valid UTC-offset timezone; will give UTC time instead...',1) if(abs(tz)>24): py3queueln(sock,'PRIVMSG '+channel+' :Err: timezone offsets from utc cannot be outside the range [-24,24] because that makes no sense; giving UTC time...') tz=0 current_time=time.asctime(time.gmtime(time.time()+(tz*60*60))) py3queueln(sock,'PRIVMSG '+channel+' :Current time is '+current_time+' (UTC '+('+'+str(tz) if tz>=0 else str(tz))+')') handled=True elif(cmd==(cmd_esc+'timecalc')): handle_timecalc(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm) handled=True #TODO: add weather forecast via darksky or yahoo weather or http://weather.gc.ca/canada_e.html (for Canada) #TODO: add a proper !seen command that shows the last time a user was online (the last QUIT or latest log we have if they are not currently online) elif(cmd==(cmd_esc+'seen-quit')): handle_seen(sock,cmd_esc,cmd,line_post_cmd,channel,is_pm) handled=True elif(cmd==(cmd_esc+'oplist')): handle_oplist(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,use_pg,db_login) handled=True #login (op aliased) -> grant the user the appropriate mode on all channels they are authorized for, or a specific channel if a channel was specified elif((cmd==(cmd_esc+'login')) or (cmd==(cmd_esc+'op'))): handle_login(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,use_pg,db_login) handled=True #setpass -> register a user who has a nick and hostmask that was invited by someone using !oplist add elif(cmd==(cmd_esc+'setpass')): handle_setpass(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,use_pg,db_login) handled=True #tell -> leave a message for a user the next time they re-join this channel #so that this can be a full replacement for all the commonly-used functionality that tard used to provide elif(cmd==(cmd_esc+'tell')): handle_tell(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,use_pg,db_login) handled=True elif(cmd.startswith(cmd_esc)): try: #alternate conversion syntax #check if the "command" is a valid floating point number conv_arg=float(cmd[len(cmd_esc):]) #the line after the "command" is the command checked against the conversion list #some arguments here are a little weird because they're being transposed found_conversion=False for conversion in unit_conv_list: #we found the requested conversion, so do the thing and output the result #note that "X to Y" gets translated here as "X->Y" if(conversion.chk_cmd(cmd_esc,cmd_esc+line_post_cmd.replace(' to ','->'))): conversion.output_conv(sock,channel,conv_arg) found_conversion=True #this was a valid number, but something went wrong during conversion if(not found_conversion): py3queueln(sock,'PRIVMSG '+channel+' :Err: Conversion not found '+line_post_cmd,1) #in any case if we got a number don't handle this line any more handled=True #the "command" wasn't a valid floating point number, #so output an error for PM, or just do nothing in a channel except ValueError: if(is_pm): py3queueln(sock,'PRIVMSG '+channel+' :Warn: Invalid command: \"'+cmd+'\"; see '+cmd_esc+'help for help',1) #this prevents the bot from learning from unrecognized ! commands #(which are usually meant for another bot) # handled=True return (handled,dbg_str) def handle_privmsg(sock,line,state_change,state_file,lines_since_write,lines_since_sort_chk): global gen_cmd global qa_sets #get some information (user, nick, host, etc.) line_info=parse_line_info(line) info=line_info['info'] nick=line_info['nick'] realname=line_info['realname'] hostmask=line_info['hostmask'] command=line_info['command'] channel=line_info['channel'] line=line_info['content'] #debug log_line('['+channel+'] <'+nick+'> '+line) #ignore blacklisted users, #but throw some output on the console so we know that's happening if nick in ignored_users: print('Warn: ignored line from '+nick+' because their nick is blacklisted (ignored)') return (lines_since_write,lines_since_sort_chk) #strip trailing whitespace because users expect that to not matter line=line.rstrip(' ').rstrip("\t") #and now because whitespace is gone it's possible to have a blank line #so ignore blank lines if(line==''): return (lines_since_write,lines_since_sort_chk) #if they PM'd us, then PM 'em right back #that'll show 'em is_pm=False if(channel==bot_nick): is_pm=True channel=nick success,cmd,line_post_cmd=get_token(line,' ') dbg_str='' cmd_esc='!' try: #if this was a command for the bot, handle it cmd_handled,cmd_dbg_str=handle_bot_cmd(sock,cmd_esc,cmd,line_post_cmd,channel,nick,is_pm,hostmask,state_change,use_pg,db_login) except Exception as e: py3queueln(sock,'PRIVMSG '+channel+' :Err: Unhandled exception '+(str(e).replace("\n",' '))+'; tell neutrak the command you used to get this and he\'ll look into it',1) return (lines_since_write,lines_since_sort_chk) if(cmd_handled): #then it's handled and we're done #debug if the command gave us a debug string dbg_str=cmd_dbg_str #support question/answer style markov chain-ing stuff elif(cmd.startswith(bot_nick)): output,dbg_str=markov.gen_from_str(state_change,use_pg,db_login,irc_str_map(line_post_cmd),random.randint(0,1)+1,retries_left=3,qa_sets=qa_sets) #if it didn't have that word as a starting state, #then just go random (fall back functionality) if(output==''): output,dbg_str=markov.generate(state_change,use_pg=use_pg,db_login=db_login,back_gen=False) #properly close CTCP when it's generated if(output.startswith('\x01ACTION') and (not output.endswith('\x01'))): output+='\x01' #prevent generating commands directed towards other bots, #if configured to do that if(not gen_cmd): if(output.startswith('!')): output='\\'+output # dbg_str='[dbg] generated from line \"'+line_post_cmd+'\"'+"\n"+dbg_str dbg_str='[dbg] (\"'+line_post_cmd+'\") '+dbg_str py3queueln(sock,'PRIVMSG '+channel+' :'+output,1) #because people often talk to the bot in complete phrases, #go ahead and include these lines in the learning set lines_since_write,lines_since_sort_chk=learn_from(line,state_change,state_file,lines_since_write,lines_since_sort_chk) dbg_output(sock,dbg_str) return (lines_since_write,lines_since_sort_chk) #if it wasn't a command, then add this to the markov chain state and update the file on disk else: #if this was a pm then let the user know how to get help if they want it if(is_pm): py3queueln(sock,'PRIVMSG '+channel+' :learning... (use '+cmd_esc+'help to get help, or '+cmd_esc+'wut to generate text)',3) lines_since_write,lines_since_sort_chk=learn_from(line,state_change,state_file,lines_since_write,lines_since_sort_chk) #at ente's request; allow users in "debug" channels to read the bot's mind #this may or may not output, depending on the dbg_state global, but it is always called #because it stores a history for later output dbg_output(sock,dbg_str) return (lines_since_write,lines_since_sort_chk) #handle a join command that was sent by the server def handle_server_join(sock,line): global bot_nick global joined_channels #get some information (user, nick, host, etc.) line_info=parse_line_info(line) info=line_info['info'] nick=line_info['nick'] realname=line_info['realname'] hostmask=line_info['hostmask'] command=line_info['command'] channel=line_info['content'] if(not (channel in joined_channels)): joined_channels[channel]={ 'names':{}, #give a 10 second delay on the first op request to give time for the NAMEs list (353) to come in 'last_op_rqst':(time.time()-max(seconds_bw_op_nag-10,0)) } #NOTE: the joining user might be ourselves, but that's fine #since we want to be in the user list as well joined_channels[channel]['names'][nick]={ 'mode':'' } #if there are any !tells queued up for this user and channel, #send them now send_tell_queue_msgs(sock=sock,channel=channel,nick=nick) def handle_server_353(sock,line): global joined_channels #confuseus @ #faid3.0 :confuseus mz Spock @neutrak success,my_name,line=get_token(line,' ') success,my_status,line=get_token(line,' ') success,channel,line=get_token(line,' ') line=line.lstrip(':') joined_channels[channel]['names'][bot_nick]['mode'],empty_str=user_mode_symbols_to_letters(my_status) names=line.split(' ') for name in names: mode_str,nick_sans_mode=user_mode_symbols_to_letters(name) #skip empty string; nicks can't be blank if(nick_sans_mode==''): continue if(not (nick_sans_mode in joined_channels[channel]['names'])): joined_channels[channel]['names'][nick_sans_mode]={} #NOTE: mode strings might update if another names list is requested later #so we override any existing mode information joined_channels[channel]['names'][nick_sans_mode]={ 'mode':mode_str } send_tell_queue_msgs(sock=sock,channel=channel,nick=nick_sans_mode) def handle_server_part(line): global joined_channels #:neu_tst!~neutrak@hostmask PART #bot-testing #:neu_tst!~neutrak@hostmask PART #bot-testing :Message #get some information (user, nick, host, etc.) line_info=parse_line_info(line) nick=line_info['nick'] channel=line_info['content'] if(line_info['content'].find(' ')>=0): success,channel,part_msg=get_token(line_info['content'],' ') part_msg=part_msg.lstrip(':') #if it was us leaving the channel #then remove the entire joined channels entry for this channel if(nick==bot_nick): joined_channels.pop(channel) #otherwise, just remove this user's information from the channel names list else: if(nick in joined_channels[channel]['names']): joined_channels[channel]['names'].pop(nick) def handle_server_quit(line): global joined_channels #NOTE: for quit we don't need to account for the case where it's us doing the quitting #because in that case we disconnect from the server #:neu_tst!~neutrak@hostmask QUIT :Quit: neu_tst #get some information (user, nick, host, etc.) line_info=parse_line_info(line) nick=line_info['nick'] for channel in joined_channels: if(nick in joined_channels[channel]['names']): joined_channels[channel]['names'].pop(nick) def handle_server_mode(sock,line): global joined_channels #:neutrak!~neutrak@hostmask MODE #bot-testing +o confuseus success,channel,line=get_token(line,' ') #ignore network-wide user settings for this user #we're only interested in channel mode changes for now if(channel==bot_nick): return #NOTE: mode changes can get really complicated with potential for +ooo type expressions #so in order to be as reliable (and lazy) as possible instead of parsing all of that #we just send a NAMES request after every mode change in a channel #to get an updated list of what the modes ended up at after all was said and done py3queueln(sock,'NAMES '+channel,1) def run_periodic_op_rqst(sock): global joined_channels db_handle=postgresql.open('pq://'+pg_user+':'+pg_passwd+'@localhost/'+pg_dbname) pg_query='SELECT * FROM user_channel_modes' postgre_ret=db_handle.prepare(pg_query) user_channel_modes=postgre_ret() db_handle.close() #add oplist-related handling here, specifically #for each channel this bot is in for channel in joined_channels: for user_channel_mode in user_channel_modes: # if there is at least one user authorized to have ops in this channel in the oplist_channels database table if(user_channel_mode['channel']==channel): # if this bot doesn't already have ops in that channel if(joined_channels[channel]['names'][bot_nick]['mode'].find('o')<0): seconds_since_last_op_nag=((time.time())-(joined_channels[channel]['last_op_rqst'])) # and it's been at least 30 minutes since this bot asked for OPs last if(seconds_since_last_op_nag>=seconds_bw_op_nag): # ping channel operators and ask them to grant OPs to the bot ch_op_nicks=[] for ch_user in joined_channels[channel]['names']: if(joined_channels[channel]['names'][ch_user]['mode'].find('o')>=0): ch_op_nicks.append(ch_user) if(len(ch_op_nicks)>0): py3queueln(sock,'PRIVMSG '+channel+' :'+(' '.join(ch_op_nicks))+' - give me mode +o please',0) joined_channels[channel]['last_op_rqst']=time.time() #once we know we control the ops list for at least one user in this channel #we can stop checking for what other users are authorized #since we know that we need channel ops regardless break def handle_server_line(sock,line,state_change,state_file,lines_since_write,lines_since_sort_chk): global bot_nick global joined_channels #ignore blank lines if(line==''): return (lines_since_write,lines_since_sort_chk) #PONG back when we get a PING; this is needed for keepalive functionality if(line.startswith('PING')): success,ping,msg=get_token(line,' :') if(success): py3queueln(sock,'PONG :'+msg,0) #request ops as needed to make !oplist function correctly run_periodic_op_rqst(sock) return (lines_since_write,lines_since_sort_chk) #error, so exit elif(line.startswith('ERROR')): exit(1) full_line=line success,server_name,line=get_token(line,' ') success,server_cmd,line=get_token(line,' ') #verbose debug if(server_cmd!='PRIVMSG'): log_line(server_name+' '+server_cmd+' '+line) #if this line isn't itself a mode change #(because if it is a mode change it might be the line that is currently giving us OPs) #NOTE: because after every mode change we re-request a names list, also ignore responses from that if(not (server_cmd in ['MODE','353','366'])): #request ops as needed to make !oplist function correctly run_periodic_op_rqst(sock) #hello message received, so auto-join if(server_cmd=='001'): #mark us as being a bot (since we are) #on networks that recognize that py3queueln(sock,'MODE '+bot_nick+' +B',1) for channel in autojoin_channels+dbg_channels: py3queueln(sock,'JOIN :'+channel,1) #on a server JOIN message, add the specified channel information to the joined_channels dict #create the channel structure if it doesn't already exist (in case we were doing the joining) #if someone other than us was doing the joining, add them to the names list for this channel elif(server_cmd=='JOIN'): handle_server_join(sock,full_line) #handle 353 names list, joins, and quits, to get a list of users for each channel we're in #which includes channel operator information #as channel operator information is necessary for oplist handling elif(server_cmd=='353'): handle_server_353(sock,line) #on PART and QUIT, remove the user from the appropriate channel information #since they are no longer present elif(server_cmd=='PART'): handle_server_part(full_line) elif(server_cmd=='QUIT'): handle_server_quit(full_line) #track mode changes both to ourselves and others in the joined_channels list elif(server_cmd=='MODE'): handle_server_mode(sock,line) #nick in use, so change nick elif(server_cmd=='433'): bot_nick+='_' py3queueln(sock,'NICK :'+bot_nick,1) #got a NICK change; update the bot_nick var if it's us #otherwise ignore #":[email protected] NICK :accirc_2" elif(server_cmd=='NICK'): name_mask=server_name.lstrip(':') bang_idx=name_mask.find('!') if(bang_idx>=0): old_nick=name_mask[0:bang_idx] new_nick=line.lstrip(':') if(old_nick==bot_nick): bot_nick=new_nick #got a PM, so reply elif(server_cmd=='PRIVMSG'): lines_since_write,lines_since_sort_chk=handle_privmsg(sock,full_line,state_change,state_file,lines_since_write,lines_since_sort_chk) #got an invite, so join elif(server_cmd=='INVITE'): succcesss,name,channel=get_token(line,' :') py3queueln(sock,'JOIN :'+channel,1) return (lines_since_write,lines_since_sort_chk) def main(state_file,use_ssl=True): global bot_nick state_change=None if(not use_pg): print('Reading in state file...') state_change=markov.read_state_change_from_file(state_file) #if given an argument, it's the name to use if(len(sys.argv)>1): bot_nick=sys.argv[1] print('Bot Nick is '+bot_nick) print('Creating '+('encrypted' if use_ssl else 'UNENCRYPTED')+' connection to '+host+' on port '+str(port)+'...') #tcp client socket sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM) try: sock.connect((host,port)) if(use_ssl): #use ssl #NOTE: this does NOT do cert checking and so could easily be mitm'd #but anything's better than nothing sock=ssl.wrap_socket(sock,do_handshake_on_connect=False) except: print('Err: Could not connect to '+host+' on port '+str(port)) return 1 #set the socket to be non-blocking #this will throw a socket.error when there is no data to read sock.setblocking(0) if(use_ssl): #we didn't actually do the handshake before we set non-blocking #so we need to do that now before we continue while True: try: sock.do_handshake() break except ssl.SSLWantReadError: select.select([sock], [], []) except ssl.SSLWantWriteError: select.select([], [sock], []) py3queueln(sock,'NICK :'+bot_nick) py3queueln(sock,'USER '+bot_nick+' 2 3 4') #initialize counters for events that only happen every n lines lines_since_write=100 lines_since_sort_chk=100 #carry from multi-line reads carry='' read_finished=True done=False while(not done): #if there is data from the user, then add that data to the outgoing queue #this allows the bot to act as a "puppet" or very primitive client stdin_data=select.select([sys.stdin],[],[],0.0)[0] while(len(stdin_data)>0): user_data=stdin_data[0].readline() user_data=user_data.rstrip("\n").rstrip("\r") print('Debug: user_data='+str(user_data)) py3queueln(sock,user_data,1) stdin_data=stdin_data[1:] if(read_finished): #send a line from the outgoing queue #if the outgoing queue is empty this does nothing if(py3send_queue(sock)): #we want our queue priorities to actually matter #so after sending something, wait a second or 2 #so that our receiving buffer can actually be ready to read any additional data #before we send more time.sleep(1.0) read_finished=False data='' try: # print('Dbg: Waiting for data...') data=py3recv(sock,BUFFER_SIZE) except ssl.SSLWantReadError: #wait 0.05 seconds before trying to read (or write) again #don't want to hog the CPU time.sleep(0.05) read_finished=True continue except socket.error as e: err=e.args[0] if(err==errno.EAGAIN or err==errno.EWOULDBLOCK): #wait 0.05 seconds before trying to read (or write) again #don't want to hog the CPU time.sleep(0.05) read_finished=True else: #if we got a real error (not just out of data) then exit print('Err: Socket Error: '+str(e)) done=True continue #carry over from previous lines that weren't newline-terminated data=carry+data #and clear out the carry for next time carry='' line='' for i in range(0,len(data)): if(data[i]=="\r" or data[i]=="\n"): lines_since_write,lines_since_sort_chk=handle_server_line(sock,line,state_change,state_file,lines_since_write,lines_since_sort_chk) line='' else: line+=data[i] if(line!=''): carry=line print('Err: Connection Closed') #if we got here then we're totally finished #so close the socket sock.close() #runtime if(__name__=='__main__'): config_file=config.dflt_cfg if(len(sys.argv)>1): config_file=sys.argv[1] print('using JSON config file '+config_file) #read the configuration from the json configuration file json_cfg_tree=config.read_json_file(config_file) #set configuration from the config file #if configuration for anything is omitted a default value from the code is used #command escape json_cmd_esc=config.get_json_param(json_cfg_tree,'cmd_esc') if(json_cmd_esc!=None): cmd_esc=json_cmd_esc #nick json_bot_nick=config.get_json_param(json_cfg_tree,'bot_nick') if(json_bot_nick!=None): bot_nick=json_bot_nick #channels to join on startup json_autojoin_channels=config.get_json_param(json_cfg_tree,'autojoin_channels') if(json_autojoin_channels!=None): autojoin_channels=json_autojoin_channels #debug channels to join and spam json_dbg_channels=config.get_json_param(json_cfg_tree,'dbg_channels') if(json_dbg_channels!=None): dbg_channels=json_dbg_channels #server connection information (host, port, encryption) json_host=config.get_json_param(json_cfg_tree,'host') if(json_host!=None): host=json_host json_port=config.get_json_param(json_cfg_tree,'port') if(json_port!=None): port=json_port json_use_ssl=config.get_json_param(json_cfg_tree,'use_ssl') if(json_use_ssl!=None): use_ssl=json_use_ssl #anti-spam settings (prevent generating commands to other bots, etc.) json_gen_cmd=config.get_json_param(json_cfg_tree,'gen_cmd') if(json_gen_cmd!=None): gen_cmd=json_gen_cmd #specially-handled user lists json_authed_users=config.get_json_param(json_cfg_tree,'authed_users') if(json_authed_users!=None): authed_users=json_authed_users json_ignored_users=config.get_json_param(json_cfg_tree,'ignored_users') if(json_ignored_users!=None): ignored_users=json_ignored_users #IRC-related configuration done #get question-answer sets from the configuration file #this feature is thanks to Mark (hey look I did it!) #these will be used to generate better responses to pre-formatted discussion json_answer_questions=config.get_json_param(json_cfg_tree,'answer_questions') if(json_answer_questions!=None): answer_questions=json_answer_questions #we allow disabling this function without requiring deleting all entries with the answer_questions bool if(answer_questions): json_qa_sets=config.get_json_param(json_cfg_tree,'qa_sets') if(json_qa_sets!=None): qa_sets=json_qa_sets else: qa_sets=[] #get markov (database) configuration use_pg=config.get_json_param(json_cfg_tree,'use_pg') if(use_pg==None): use_pg=False if(use_pg): #this is for the optional postgres backend config_tree=config.read_json_file(config_file) pg_user=config.get_json_param(config_tree,'pg_user') pg_passwd=config.get_json_param(config_tree,'pg_passwd') pg_dbname=config.get_json_param(config_tree,'pg_dbname') if(pg_user==None or pg_passwd==None or pg_dbname==None): print('Err: Need username, password, and db settings to use postgresql backend') use_pg=False else: db_login=markov.db_info(pg_user,pg_passwd,pg_dbname) print('using postgres database '+db_login.db_name+' for input and output of state changes') #run the markov.sql file on this database to ensure all tables are set up correctly #just in case any don't already exist db_handle=markov.pg_connect(db_login) db_handle.execute(open('markov.sql','r').read()) db_handle.close() #update tell helptext since with postgres messages actually will persist on restart cmd_helptext['tell <nick> <message>']='leaves a message for a user the next time they join this channel' main(config.get_json_param(config.read_json_file(config_file),'state_file'),use_ssl=use_ssl)
clumsy/intellij-community
refs/heads/master
python/testData/refactoring/changeSignature/addDefaultParam.before.py
415
def bar(a, b): pass bar(1, 2)
mellis13/moose
refs/heads/devel
framework/contrib/nsiqcppstyle/nsiqcppstyle_util.py
43
# Copyright (c) 2009 NHN Inc. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of NHN Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys def WeAreFrozen(): return hasattr(sys, "frozen") def ModulePath(): if WeAreFrozen(): return os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding())) return os.path.dirname(unicode(__file__, sys.getfilesystemencoding())) def GetRuntimePath() : "Return the path of this tool" if (sys.platform == "win32") : runtimePath = ModulePath(); else : modename = globals()['__name__'] module = sys.modules[modename] runtimePath = os.path.dirname(module.__file__) return runtimePath def GetSystemKey() : if (sys.platform == "win32") : return "window" else : return "linux"
junalmeida/Sick-Beard
refs/heads/master
sickbeard/providers/omgwtfnzbs.py
15
# Author: Jordon Smith <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import generic import sickbeard from sickbeard import tvcache from sickbeard import helpers from sickbeard import classes from sickbeard import logger from sickbeard.exceptions import ex, AuthException from sickbeard import show_name_helpers from datetime import datetime try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree try: import json except ImportError: from lib import simplejson as json class OmgwtfnzbsProvider(generic.NZBProvider): def __init__(self): generic.NZBProvider.__init__(self, "omgwtfnzbs") self.cache = OmgwtfnzbsCache(self) self.url = 'https://omgwtfnzbs.org/' self.supportsBacklog = True def isEnabled(self): return sickbeard.OMGWTFNZBS def _checkAuth(self): if not sickbeard.OMGWTFNZBS_USERNAME or not sickbeard.OMGWTFNZBS_APIKEY: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, parsed_data, is_XML=True): if parsed_data is None: return self._checkAuth() if is_XML: # provider doesn't return xml on error return True else: parsedJSON = parsed_data if 'notice' in parsedJSON: description_text = parsedJSON.get('notice') if 'information is incorrect' in parsedJSON.get('notice'): logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(description_text), logger.DEBUG) raise AuthException("Your authentication credentials for " + self.name + " are incorrect, check your config.") elif 'please try again later' in parsedJSON.get('notice'): logger.log(self.name + u" down for maintenance, aborting", logger.DEBUG) return False elif '0 results matched your terms' in parsedJSON.get('notice'): return True else: logger.log(u"Unknown error given from " + self.name + " : " + str(description_text), logger.DEBUG) return False return True def _get_season_search_strings(self, show, season): return [x for x in show_name_helpers.makeSceneSeasonSearchString(show, season)] def _get_episode_search_strings(self, ep_obj): return [x for x in show_name_helpers.makeSceneSearchString(ep_obj)] def _get_title_and_url(self, item): return (item['release'].replace('_', '.'), item['getnzb']) def _doSearch(self, search, show=None, retention=0): self._checkAuth() params = {'user': sickbeard.OMGWTFNZBS_USERNAME, 'api': sickbeard.OMGWTFNZBS_APIKEY, 'eng': 1, 'nukes': 1, # show nuke info 'catid': '19,20', # SD,HD 'retention': sickbeard.USENET_RETENTION, 'search': search} if retention or not params['retention']: params['retention'] = retention search_url = 'https://api.omgwtfnzbs.org/json/?' + urllib.urlencode(params) logger.log(u"Search url: " + search_url, logger.DEBUG) data = self.getURL(search_url) if not data: logger.log(u"No data returned from " + search_url, logger.ERROR) return [] parsedJSON = helpers.parse_json(data) if parsedJSON is None: logger.log(u"Error trying to load " + self.name + " JSON data", logger.ERROR) return [] if self._checkAuthFromData(parsedJSON, is_XML=False): results = [] for item in parsedJSON: if 'nuked' in item and item['nuked'].startswith('1'): # logger.log(u"Skipping nuked release: " + item['release'], logger.DEBUG) continue if 'release' in item and 'getnzb' in item: results.append(item) return results return [] def findPropers(self, search_date=None): search_terms = ['.PROPER.', '.REPACK.'] results = [] for term in search_terms: for item in self._doSearch(term, retention=4): if 'usenetage' in item: title, url = self._get_title_and_url(item) try: result_date = datetime.fromtimestamp(int(item['usenetage'])) except: result_date = None if result_date: results.append(classes.Proper(title, url, result_date)) return results class OmgwtfnzbsCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) self.minTime = 20 def _getRSSData(self): params = {'user': sickbeard.OMGWTFNZBS_USERNAME, 'api': sickbeard.OMGWTFNZBS_APIKEY, 'eng': 1, 'delay': 30, 'catid': '19,20'} # SD,HD rss_url = 'https://rss.omgwtfnzbs.org/rss-download.php?' + urllib.urlencode(params) logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG) data = self.provider.getURL(rss_url) if not data: logger.log(u"No data returned from " + rss_url, logger.ERROR) return None return data def _checkAuth(self, parsedXML): return self.provider._checkAuthFromData(parsedXML) provider = OmgwtfnzbsProvider()
myusuf3/delorean
refs/heads/master
delorean/exceptions.py
4
class DeloreanError(Exception): """ Base Delorean Exception class """ def __init__(self, msg): self.msg = str(msg) Exception.__init__(self, msg) def __str__(self): return self.msg class DeloreanInvalidTimezone(DeloreanError): """ Exception that is raised when an invalid timezone is passed in. """ pass class DeloreanInvalidDatetime(DeloreanError): """ Exception that is raised when an improper datetime object is passed in. """ pass
nicholaslemay/python_koans
refs/heads/master
python3/runner/runner_tests/test_helper.py
244
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from runner import helper class TestHelper(unittest.TestCase): def test_that_get_class_name_works_with_a_string_instance(self): self.assertEqual("str", helper.cls_name(str())) def test_that_get_class_name_works_with_a_4(self): self.assertEquals("int", helper.cls_name(4)) def test_that_get_class_name_works_with_a_tuple(self): self.assertEquals("tuple", helper.cls_name((3,"pie", [])))
Intel-Corporation/tensorflow
refs/heads/master
tensorflow/python/training/sync_replicas_optimizer_test.py
19
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for sync_replicas_optimizer.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.framework.test_util import create_local_cluster from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adam from tensorflow.python.training import gradient_descent from tensorflow.python.training import training # Creates the workers and return their sessions, graphs, train_ops. def get_workers(num_workers, replicas_to_aggregate, workers): sessions = [] graphs = [] train_ops = [] for worker_id in range(num_workers): graph = ops.Graph() is_chief = (worker_id == 0) with graph.as_default(): with ops.device("/job:ps/task:0"): global_step = variables.VariableV1( 0, name="global_step", trainable=False) var_0 = variables.VariableV1(0.0, name="v0") with ops.device("/job:ps/task:1"): var_1 = variables.VariableV1(1.0, name="v1") var_sparse = variables.VariableV1([[3.0], [4.0]], name="v_sparse") with ops.device("/job:worker/task:" + str(worker_id)): grads_0 = constant_op.constant(0.1 + worker_id * 0.2) grads_1 = constant_op.constant(0.9 + worker_id * 0.2) # This is to test against sparse gradients. grads_sparse = ops.IndexedSlices( constant_op.constant( [0.1 + worker_id * 0.2], shape=[1, 1]), constant_op.constant([1]), constant_op.constant([2, 1])) sgd_opt = gradient_descent.GradientDescentOptimizer(2.0) sync_rep_opt = training.SyncReplicasOptimizer( sgd_opt, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=num_workers) train_op = [ sync_rep_opt.apply_gradients( zip([grads_0, grads_1, grads_sparse], [var_0, var_1, var_sparse]), global_step=global_step) ] sync_replicas_hook = sync_rep_opt.make_session_run_hook( is_chief, num_tokens=num_workers) # Creates MonitoredSession session = training.MonitoredTrainingSession( master=workers[worker_id].target, is_chief=is_chief, hooks=[sync_replicas_hook]) sessions.append(session) graphs.append(graph) train_ops.append(train_op) return sessions, graphs, train_ops class SyncReplicasOptimizerTest(test.TestCase): def _run(self, train_op, sess): sess.run(train_op) @test_util.run_v1_only("b/120545219") def test2Workers(self): num_workers = 2 replicas_to_aggregate = 2 num_ps = 2 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) # Creates and returns all the workers. sessions, graphs, train_ops = get_workers(num_workers, replicas_to_aggregate, workers) # Chief should have already initialized all the variables. var_0_g_0 = graphs[0].get_tensor_by_name("v0:0") var_1_g_0 = graphs[0].get_tensor_by_name("v1:0") local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0") self.assertAllEqual(0.0, sessions[0].run(var_0_g_0)) self.assertAllEqual(1.0, sessions[0].run(var_1_g_0)) self.assertAllEqual(0, sessions[0].run(local_step_0)) # Will just use session 1 to verify all the variables later. var_0_g_1 = graphs[1].get_tensor_by_name("v0:0") var_1_g_1 = graphs[1].get_tensor_by_name("v1:0") var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0") local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0") global_step = graphs[1].get_tensor_by_name("global_step:0") # The steps should also be initialized. self.assertAllEqual(0, sessions[1].run(global_step)) self.assertAllEqual(0, sessions[1].run(local_step_1)) self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1)) # We have initial tokens in the queue so we can call this one by one. After # the first step, this will no longer work as there will be no more extra # tokens in the queue. sessions[0].run(train_ops[0]) sessions[1].run(train_ops[1]) # The global step should have been updated and the variables should now have # the new values after the average of the gradients are applied. while sessions[1].run(global_step) != 1: time.sleep(0.01) self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]], sessions[1].run(var_sparse_g_1)) # The local step for both workers should still be 0 because the initial # tokens in the token queue are 0s. This means that the following # computation of the gradients will be wasted as local_step is smaller than # the current global step. However, this only happens once when the system # just starts and this is necessary to make the system robust for the case # when chief gets restarted by errors/preemption/... self.assertAllEqual(0, sessions[0].run(local_step_0)) self.assertAllEqual(0, sessions[1].run(local_step_1)) sessions[0].run(train_ops[0]) sessions[1].run(train_ops[1]) # Although the global step should still be 1 as explained above, the local # step should now be updated to 1. The variables are still the same. self.assertAllEqual(1, sessions[1].run(global_step)) self.assertAllEqual(1, sessions[0].run(local_step_0)) self.assertAllEqual(1, sessions[1].run(local_step_1)) self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) # At this step, the token queue is empty. So the 2 workers need to work # together to proceed. threads = [] threads.append( self.checkedThread( target=self._run, args=(train_ops[0], sessions[0]))) threads.append( self.checkedThread( target=self._run, args=(train_ops[1], sessions[1]))) # The two workers starts to execute the train op. for thread in threads: thread.start() for thread in threads: thread.join() # The global step should now be 2 and the gradients should have been # applied twice. self.assertAllEqual(2, sessions[1].run(global_step)) self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) # 3 workers and one of them is backup. @test_util.run_v1_only("b/120545219") def test3Workers1Backup(self): num_workers = 3 replicas_to_aggregate = 2 num_ps = 2 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) # Creates and returns all the workers. sessions, graphs, train_ops = get_workers(num_workers, replicas_to_aggregate, workers) # Chief should have already initialized all the variables. var_0_g_1 = graphs[1].get_tensor_by_name("v0:0") var_1_g_1 = graphs[1].get_tensor_by_name("v1:0") local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0") global_step = graphs[1].get_tensor_by_name("global_step:0") # The steps should also be initilized. self.assertAllEqual(0, sessions[1].run(global_step)) self.assertAllEqual(0, sessions[1].run(local_step_1)) # We have initial tokens in the queue so we can call this one by one. After # the token queue becomes empty, they should be called concurrently. # Here worker 0 and worker 2 finished first. sessions[0].run(train_ops[0]) sessions[2].run(train_ops[2]) # The global step should have been updated since we only need to collect 2 # gradients. The variables should now have the new values after the average # of the gradients from worker 0/2 are applied. while sessions[1].run(global_step) != 1: time.sleep(0.01) self.assertAllEqual(1, sessions[1].run(global_step)) self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1)) # Worker 1 finished later and its gradients will now be dropped as it is # stale. sessions[1].run(train_ops[1]) # As shown in the previous test, the local_step for all workers should be # still 0 so their next computation will also be dropped. sessions[0].run(train_ops[0]) sessions[1].run(train_ops[1]) sessions[2].run(train_ops[2]) # Although the global step should still be 1 as explained above, the local # step should now be updated to 1. Just check worker 1 as an example. self.assertAllEqual(1, sessions[1].run(global_step)) self.assertAllEqual(1, sessions[1].run(local_step_1)) thread_0 = self.checkedThread( target=self._run, args=(train_ops[0], sessions[0])) thread_1 = self.checkedThread( target=self._run, args=(train_ops[1], sessions[1])) # Lets worker 0 execute first. # It will wait as we need 2 workers to finish this step and the global step # should be still 1. thread_0.start() self.assertAllEqual(1, sessions[1].run(global_step)) # Starts worker 1. thread_1.start() thread_1.join() thread_0.join() # The global step should now be 2 and the gradients should have been # applied again. self.assertAllEqual(2, sessions[1].run(global_step)) self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) class SyncReplicasOptimizerHookTest(test.TestCase): def testErrorIfUsedBeforeMinimizeCalled(self): opt = training.SyncReplicasOptimizer( opt=gradient_descent.GradientDescentOptimizer(1.0), replicas_to_aggregate=1, total_num_replicas=1) hook = opt.make_session_run_hook(True) with self.assertRaisesRegexp(ValueError, "apply_gradient should be called"): hook.begin() @test_util.run_v1_only("b/120545219") def testCanCreatedBeforeMinimizeCalled(self): """This behavior is required to be integrated with Estimators.""" opt = training.SyncReplicasOptimizer( opt=gradient_descent.GradientDescentOptimizer(1.0), replicas_to_aggregate=1, total_num_replicas=1) hook = opt.make_session_run_hook(True) v = variables.VariableV1([0.]) global_step = variables.VariableV1(0, name="global_step", trainable=False) opt.minimize(v, global_step=global_step) hook.begin() @test_util.run_v1_only("b/120545219") def testFetchVariableList(self): opt = training.SyncReplicasOptimizer( opt=adam.AdamOptimizer(0.01), replicas_to_aggregate=1, total_num_replicas=1) v = variables.VariableV1([0.], name="fetch_variable_test") global_step = variables.VariableV1(0, name="global_step", trainable=False) opt.minimize(v, global_step=global_step) opt_variables = opt.variables() beta1_power, beta2_power = opt._opt._get_beta_accumulators() self.assertIn(beta1_power, opt_variables) self.assertIn(beta2_power, opt_variables) if __name__ == "__main__": test.main()
kumarshivam675/Mobile10X-Hack
refs/heads/master
irctc.py
1
import urllib2 import hmac from hashlib import sha1 import json def MyPnrTestFn(): #Setting Credentials apiCreds = {} apiCreds["Key"] = "Your public key" apiCreds["Secret"] = "Your api secret/private key" pnr = "1234567890" #Test Pnr requestUrlTemplate = "http://railpnrapi.com/api/check_pnr/pnr/"+pnr+"/format/json/pbapikey/" + apiCreds["Key"] + "/pbapisign/" #Get all the request parameter paramset = {} paramset["pnr"] = pnr paramset["format"] = "json" paramset["pbapikey"] = apiCreds["Key"] #Sort the keys and concat their values keylist = sorted(paramset.keys()) inputString = '' for item in keylist: item1 = paramset[item] inputString = inputString + str(paramset[item]) #Compute hash signature = GenerateHmac(inputString, apiCreds["Secret"]) requestUrl = requestUrlTemplate + str(signature) response = urllib2.urlopen(requestUrl).read() #Parse Json response jsonOut = json.loads(response) #display response print "Response Code:" , jsonOut["response_code"] print "PNR: " , jsonOut["pnr"] print "Train No.: ", jsonOut["train_num"] print "Train Name: ", jsonOut["train_name"] print "DOJ: ", jsonOut["doj"] print "From Station (Code/Name): " + jsonOut["from_station"]["code"] + "/" + jsonOut["from_station"]["name"] print "To Station (Code/Name): " + jsonOut["to_station"]["code"] + "/" + jsonOut["to_station"]["name"] print "Reservation Upto (Code/Name): "+ jsonOut["reservation_upto"]["code"] + "/" + jsonOut["reservation_upto"]["name"] print "Boarding Point (Code/Name): " + jsonOut["boarding_point"]["code"] + "/" + jsonOut["boarding_point"]["name"] print "Class: ", jsonOut["class"] print "No. of Passengers: ", jsonOut["no_of_passengers"] print "Chart Status: ", jsonOut["chart_prepared"] print print "Passengers: " print "********************************" for passenger in jsonOut["passengers"]: print "Passenger #:" + str(passenger["sr"]) + ", Booking Status:" + passenger["booking_status"] + ", Current Status:" + passenger["current_status"] print print "********************************" def GenerateHmac(input, bytekey): myHmacSha1 = hmac.new(bytekey, input, sha1) return myHmacSha1.hexdigest() MyPnrTestFn()
codeaudit/pattern-1
refs/heads/master
pattern/web/soup/setup.py
50
from distutils.core import setup import unittest import warnings warnings.filterwarnings("ignore", "Unknown distribution option") import sys # patch distutils if it can't cope with the "classifiers" keyword if sys.version < '2.2.3': from distutils.dist import DistributionMetadata DistributionMetadata.classifiers = None DistributionMetadata.download_url = None from BeautifulSoup import __version__ #Make sure all the tests complete. import BeautifulSoupTests loader = unittest.TestLoader() result = unittest.TestResult() suite = loader.loadTestsFromModule(BeautifulSoupTests) suite.run(result) if not result.wasSuccessful(): print "Unit tests have failed!" for l in result.errors, result.failures: for case, error in l: print "-" * 80 desc = case.shortDescription() if desc: print desc print error print '''If you see an error like: "'ascii' codec can't encode character...", see\nthe Beautiful Soup documentation:\n http://www.crummy.com/software/BeautifulSoup/documentation.html#Why%20can't%20Beautiful%20Soup%20print%20out%20the%20non-ASCII%20characters%20I%20gave%20it?''' print "This might or might not be a problem depending on what you plan to do with\nBeautiful Soup." if sys.argv[1] == 'sdist': print print "I'm not going to make a source distribution since the tests don't pass." sys.exit(1) setup(name="BeautifulSoup", version=__version__, py_modules=['BeautifulSoup', 'BeautifulSoupTests'], description="HTML/XML parser for quick-turnaround applications like screen-scraping.", author="Leonard Richardson", author_email = "[email protected]", long_description="""Beautiful Soup parses arbitrarily invalid SGML and provides a variety of methods and Pythonic idioms for iterating and searching the parse tree.""", classifiers=["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Python Software Foundation License", "Programming Language :: Python", "Topic :: Text Processing :: Markup :: HTML", "Topic :: Text Processing :: Markup :: XML", "Topic :: Text Processing :: Markup :: SGML", "Topic :: Software Development :: Libraries :: Python Modules", ], url="http://www.crummy.com/software/BeautifulSoup/", license="BSD", download_url="http://www.crummy.com/software/BeautifulSoup/download/" ) # Send announce to: # [email protected] # [email protected]
sjdv1982/seamless
refs/heads/master
docs/archive/spyder-like-silk/classes/primitives.py
1
# Copyright 2007-2016, Sjoerd de Vries # TODO: primitive arrays import ast import numpy as np from . import SilkObject, SilkStringLike class Float(float, SilkObject): """Wrapper class around a Python float Uses float32 as numpy representation""" dtype = np.float32 def json(self): return self def __eq__(self, other): return float(self) == other def _print(self, spaces): return str(self) class Integer(int, SilkObject): """Wrapper class around a Python int Uses int32 as numpy representation""" dtype = np.int32 def json(self): return self def __eq__(self, other): return int(self) == other def _print(self, spaces): return str(self) class String(str, SilkStringLike): """Wrapper class around a Python string Numpy representation is an UTF-8-encoded 255-length byte string""" dtype = '|S255' def __new__(self, s): if s is None: raise ValueError if isinstance(s, String): return str.__new__(self, s) if isinstance(s, bytes): return str.__new__(self, s.decode()) s = str(s) if len(s) and s[0] == s[-1]: if s[0] in ("'", '"'): try: astree = ast.parse(s) s = list(ast.iter_fields(astree))[0][1][0].value.s except Exception: pass ret = str.__new__(self, s) ret._validate() return ret def _validate(self): pass def json(self): return self def __eq__(self, other): return str.__eq__(self, other) def __hash__(self): return str.__hash__(self) def _print(self, spaces): return '"' + str.__str__(self) + '"' class Bool(int, SilkObject): """Class that emulates a Python bool Unlike bool, "True" is equivalent to True and "False" is equivalent to False""" dtype = np.bool def __new__(self, b): if b == "True" or b == "\'True\'" or b == "\"True\"": return int.__new__(self, True) elif b == "False" or b == "\'False\'" or b == "\"False\"": return int.__new__(self, False) else: return int.__new__(self, bool(b)) def __str__(self): if self is False: return "False" else: return "True" def json(self): if self: return True else: return False def __eq__(self, other): return bool(self) == other def _print(self, spaces): return str(self) class Double(Float): """Wrapper class around a Python float Uses float64 as binary representation""" dtype = np.float64 class Long(Integer): """Wrapper class around a Python integer Uses int64 as binary representation""" dtype = np.int64
wdv4758h/ZipPy
refs/heads/master
lib-python/3/wsgiref/validate.py
51
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php # Licensed to PSF under a Contributor Agreement """ Middleware to check for obedience to the WSGI specification. Some of the things this checks: * Signature of the application and start_response (including that keyword arguments are not used). * Environment checks: - Environment is a dictionary (and not a subclass). - That all the required keys are in the environment: REQUEST_METHOD, SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors, wsgi.multithread, wsgi.multiprocess, wsgi.run_once - That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the environment (these headers should appear as CONTENT_LENGTH and CONTENT_TYPE). - Warns if QUERY_STRING is missing, as the cgi module acts unpredictably in that case. - That CGI-style variables (that don't contain a .) have (non-unicode) string values - That wsgi.version is a tuple - That wsgi.url_scheme is 'http' or 'https' (@@: is this too restrictive?) - Warns if the REQUEST_METHOD is not known (@@: probably too restrictive). - That SCRIPT_NAME and PATH_INFO are empty or start with / - That at least one of SCRIPT_NAME or PATH_INFO are set. - That CONTENT_LENGTH is a positive integer. - That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should be '/'). - That wsgi.input has the methods read, readline, readlines, and __iter__ - That wsgi.errors has the methods flush, write, writelines * The status is a string, contains a space, starts with an integer, and that integer is in range (> 100). * That the headers is a list (not a subclass, not another kind of sequence). * That the items of the headers are tuples of strings. * That there is no 'status' header (that is used in CGI, but not in WSGI). * That the headers don't contain newlines or colons, end in _ or -, or contain characters codes below 037. * That Content-Type is given if there is content (CGI often has a default content type, but WSGI does not). * That no Content-Type is given when there is no content (@@: is this too restrictive?) * That the exc_info argument to start_response is a tuple or None. * That all calls to the writer are with strings, and no other methods on the writer are accessed. * That wsgi.input is used properly: - .read() is called with zero or one argument - That it returns a string - That readline, readlines, and __iter__ return strings - That .close() is not called - No other methods are provided * That wsgi.errors is used properly: - .write() and .writelines() is called with a string - That .close() is not called, and no other methods are provided. * The response iterator: - That it is not a string (it should be a list of a single string; a string will work, but perform horribly). - That .__next__() returns a string - That the iterator is not iterated over until start_response has been called (that can signal either a server or application error). - That .close() is called (doesn't raise exception, only prints to sys.stderr, because we only know it isn't called when the object is garbage collected). """ __all__ = ['validator'] import re import sys import warnings header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$') bad_header_value_re = re.compile(r'[\000-\037]') class WSGIWarning(Warning): """ Raised in response to WSGI-spec-related warnings """ def assert_(cond, *args): if not cond: raise AssertionError(*args) def check_string_type(value, title): if type (value) is str: return value raise AssertionError( "{0} must be of type str (got {1})".format(title, repr(value))) def validator(application): """ When applied between a WSGI server and a WSGI application, this middleware will check for WSGI compliancy on a number of levels. This middleware does not modify the request or response in any way, but will throw an AssertionError if anything seems off (except for a failure to close the application iterator, which will be printed to stderr -- there's no way to throw an exception at that point). """ def lint_app(*args, **kw): assert_(len(args) == 2, "Two arguments required") assert_(not kw, "No keyword arguments allowed") environ, start_response = args check_environ(environ) # We use this to check if the application returns without # calling start_response: start_response_started = [] def start_response_wrapper(*args, **kw): assert_(len(args) == 2 or len(args) == 3, ( "Invalid number of arguments: %s" % (args,))) assert_(not kw, "No keyword arguments allowed") status = args[0] headers = args[1] if len(args) == 3: exc_info = args[2] else: exc_info = None check_status(status) check_headers(headers) check_content_type(status, headers) check_exc_info(exc_info) start_response_started.append(None) return WriteWrapper(start_response(*args)) environ['wsgi.input'] = InputWrapper(environ['wsgi.input']) environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors']) iterator = application(environ, start_response_wrapper) assert_(iterator is not None and iterator != False, "The application must return an iterator, if only an empty list") check_iterator(iterator) return IteratorWrapper(iterator, start_response_started) return lint_app class InputWrapper: def __init__(self, wsgi_input): self.input = wsgi_input def read(self, *args): assert_(len(args) == 1) v = self.input.read(*args) assert_(type(v) is bytes) return v def readline(self, *args): assert_(len(args) <= 1) v = self.input.readline(*args) assert_(type(v) is bytes) return v def readlines(self, *args): assert_(len(args) <= 1) lines = self.input.readlines(*args) assert_(type(lines) is list) for line in lines: assert_(type(line) is bytes) return lines def __iter__(self): while 1: line = self.readline() if not line: return yield line def close(self): assert_(0, "input.close() must not be called") class ErrorWrapper: def __init__(self, wsgi_errors): self.errors = wsgi_errors def write(self, s): assert_(type(s) is str) self.errors.write(s) def flush(self): self.errors.flush() def writelines(self, seq): for line in seq: self.write(line) def close(self): assert_(0, "errors.close() must not be called") class WriteWrapper: def __init__(self, wsgi_writer): self.writer = wsgi_writer def __call__(self, s): assert_(type(s) is bytes) self.writer(s) class PartialIteratorWrapper: def __init__(self, wsgi_iterator): self.iterator = wsgi_iterator def __iter__(self): # We want to make sure __iter__ is called return IteratorWrapper(self.iterator, None) class IteratorWrapper: def __init__(self, wsgi_iterator, check_start_response): self.original_iterator = wsgi_iterator self.iterator = iter(wsgi_iterator) self.closed = False self.check_start_response = check_start_response def __iter__(self): return self def __next__(self): assert_(not self.closed, "Iterator read after closed") v = next(self.iterator) if type(v) is not bytes: assert_(False, "Iterator yielded non-bytestring (%r)" % (v,)) if self.check_start_response is not None: assert_(self.check_start_response, "The application returns and we started iterating over its body, but start_response has not yet been called") self.check_start_response = None return v def close(self): self.closed = True if hasattr(self.original_iterator, 'close'): self.original_iterator.close() def __del__(self): if not self.closed: sys.stderr.write( "Iterator garbage collected without being closed") assert_(self.closed, "Iterator garbage collected without being closed") def check_environ(environ): assert_(type(environ) is dict, "Environment is not of the right type: %r (environment: %r)" % (type(environ), environ)) for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT', 'wsgi.version', 'wsgi.input', 'wsgi.errors', 'wsgi.multithread', 'wsgi.multiprocess', 'wsgi.run_once']: assert_(key in environ, "Environment missing required key: %r" % (key,)) for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']: assert_(key not in environ, "Environment should not have the key: %s " "(use %s instead)" % (key, key[5:])) if 'QUERY_STRING' not in environ: warnings.warn( 'QUERY_STRING is not in the WSGI environment; the cgi ' 'module will use sys.argv when this variable is missing, ' 'so application errors are more likely', WSGIWarning) for key in environ.keys(): if '.' in key: # Extension, we don't care about its type continue assert_(type(environ[key]) is str, "Environmental variable %s is not a string: %r (value: %r)" % (key, type(environ[key]), environ[key])) assert_(type(environ['wsgi.version']) is tuple, "wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],)) assert_(environ['wsgi.url_scheme'] in ('http', 'https'), "wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme']) check_input(environ['wsgi.input']) check_errors(environ['wsgi.errors']) # @@: these need filling out: if environ['REQUEST_METHOD'] not in ( 'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'): warnings.warn( "Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'], WSGIWarning) assert_(not environ.get('SCRIPT_NAME') or environ['SCRIPT_NAME'].startswith('/'), "SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME']) assert_(not environ.get('PATH_INFO') or environ['PATH_INFO'].startswith('/'), "PATH_INFO doesn't start with /: %r" % environ['PATH_INFO']) if environ.get('CONTENT_LENGTH'): assert_(int(environ['CONTENT_LENGTH']) >= 0, "Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH']) if not environ.get('SCRIPT_NAME'): assert_('PATH_INFO' in environ, "One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO " "should at least be '/' if SCRIPT_NAME is empty)") assert_(environ.get('SCRIPT_NAME') != '/', "SCRIPT_NAME cannot be '/'; it should instead be '', and " "PATH_INFO should be '/'") def check_input(wsgi_input): for attr in ['read', 'readline', 'readlines', '__iter__']: assert_(hasattr(wsgi_input, attr), "wsgi.input (%r) doesn't have the attribute %s" % (wsgi_input, attr)) def check_errors(wsgi_errors): for attr in ['flush', 'write', 'writelines']: assert_(hasattr(wsgi_errors, attr), "wsgi.errors (%r) doesn't have the attribute %s" % (wsgi_errors, attr)) def check_status(status): status = check_string_type(status, "Status") # Implicitly check that we can turn it into an integer: status_code = status.split(None, 1)[0] assert_(len(status_code) == 3, "Status codes must be three characters: %r" % status_code) status_int = int(status_code) assert_(status_int >= 100, "Status code is invalid: %r" % status_int) if len(status) < 4 or status[3] != ' ': warnings.warn( "The status string (%r) should be a three-digit integer " "followed by a single space and a status explanation" % status, WSGIWarning) def check_headers(headers): assert_(type(headers) is list, "Headers (%r) must be of type list: %r" % (headers, type(headers))) header_names = {} for item in headers: assert_(type(item) is tuple, "Individual headers (%r) must be of type tuple: %r" % (item, type(item))) assert_(len(item) == 2) name, value = item name = check_string_type(name, "Header name") value = check_string_type(value, "Header value") assert_(name.lower() != 'status', "The Status header cannot be used; it conflicts with CGI " "script, and HTTP status is not given through headers " "(value: %r)." % value) header_names[name.lower()] = None assert_('\n' not in name and ':' not in name, "Header names may not contain ':' or '\\n': %r" % name) assert_(header_re.search(name), "Bad header name: %r" % name) assert_(not name.endswith('-') and not name.endswith('_'), "Names may not end in '-' or '_': %r" % name) if bad_header_value_re.search(value): assert_(0, "Bad header value: %r (bad char: %r)" % (value, bad_header_value_re.search(value).group(0))) def check_content_type(status, headers): status = check_string_type(status, "Status") code = int(status.split(None, 1)[0]) # @@: need one more person to verify this interpretation of RFC 2616 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html NO_MESSAGE_BODY = (204, 304) for name, value in headers: name = check_string_type(name, "Header name") if name.lower() == 'content-type': if code not in NO_MESSAGE_BODY: return assert_(0, ("Content-Type header found in a %s response, " "which must not return content.") % code) if code not in NO_MESSAGE_BODY: assert_(0, "No Content-Type header found in headers (%s)" % headers) def check_exc_info(exc_info): assert_(exc_info is None or type(exc_info) is tuple, "exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info))) # More exc_info checks? def check_iterator(iterator): # Technically a bytestring is legal, which is why it's a really bad # idea, because it may cause the response to be returned # character-by-character assert_(not isinstance(iterator, (str, bytes)), "You should not return a string as your application iterator, " "instead return a single-item list containing a bytestring.")
laurent-george/bokeh
refs/heads/master
examples/charts/file/dots.py
37
from collections import OrderedDict from bokeh.charts import Dot, show, output_file # create some example data xyvalues = OrderedDict( python=[2, 3, 7, 5, 26], pypy=[12, 33, 47, 15, 126], jython=[22, 43, 10, 25, 26], ) # any of the following commented are also valid Dot inputs #xyvalues = pd.DataFrame(xyvalues) #xyvalues = list(xyvalues.values()) #xyvalues = np.array(list(xyvalues.values())) output_file("dots.html") dots = Dot( xyvalues, cat=['lists','loops','dicts', 'gen exp', 'exceptions'], title="Dots Example", ylabel='Performance', legend=True ) show(dots)
liorvh/raspberry_pwn
refs/heads/master
src/pentest/metagoofil/hachoir_parser/video/asf.py
9
""" Advanced Streaming Format (ASF) parser, format used by Windows Media Video (WMF) and Windows Media Audio (WMA). Informations: - http://www.microsoft.com/windows/windowsmedia/forpros/format/asfspec.aspx - http://swpat.ffii.org/pikta/xrani/asf/index.fr.html Author: Victor Stinner Creation: 5 august 2006 """ from hachoir_parser import Parser from hachoir_core.field import (FieldSet, ParserError, UInt16, UInt32, UInt64, TimestampWin64, TimedeltaWin64, String, PascalString16, Enum, Bit, Bits, PaddingBits, PaddingBytes, NullBytes, RawBytes) from hachoir_core.endian import LITTLE_ENDIAN from hachoir_core.text_handler import ( textHandler, displayHandler, filesizeHandler) from hachoir_core.tools import humanBitRate from itertools import izip from hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name from hachoir_parser.common.win32 import BitmapInfoHeader, GUID MAX_HEADER_SIZE = 100 * 1024 # bytes class AudioHeader(FieldSet): guid = "F8699E40-5B4D-11CF-A8FD-00805F5C442B" def createFields(self): yield Enum(UInt16(self, "twocc"), audio_codec_name) yield UInt16(self, "channels") yield UInt32(self, "sample_rate") yield UInt32(self, "bit_rate") yield UInt16(self, "block_align") yield UInt16(self, "bits_per_sample") yield UInt16(self, "codec_specific_size") size = self["codec_specific_size"].value if size: yield RawBytes(self, "codec_specific", size) class BitrateMutualExclusion(FieldSet): guid = "D6E229DC-35DA-11D1-9034-00A0C90349BE" mutex_name = { "D6E22A00-35DA-11D1-9034-00A0C90349BE": "Language", "D6E22A01-35DA-11D1-9034-00A0C90349BE": "Bitrate", "D6E22A02-35DA-11D1-9034-00A0C90349BE": "Unknown", } def createFields(self): yield Enum(GUID(self, "exclusion_type"), self.mutex_name) yield UInt16(self, "nb_stream") for index in xrange(self["nb_stream"].value): yield UInt16(self, "stream[]") class VideoHeader(FieldSet): guid = "BC19EFC0-5B4D-11CF-A8FD-00805F5C442B" def createFields(self): if False: yield UInt32(self, "width0") yield UInt32(self, "height0") yield PaddingBytes(self, "reserved[]", 7) yield UInt32(self, "width") yield UInt32(self, "height") yield PaddingBytes(self, "reserved[]", 2) yield UInt16(self, "depth") yield Enum(String(self, "codec", 4, charset="ASCII"), video_fourcc_name) yield NullBytes(self, "padding", 20) else: yield UInt32(self, "width") yield UInt32(self, "height") yield PaddingBytes(self, "reserved[]", 1) yield UInt16(self, "format_data_size") if self["format_data_size"].value < 40: raise ParserError("Unknown format data size") yield BitmapInfoHeader(self, "bmp_info", use_fourcc=True) class FileProperty(FieldSet): guid = "8CABDCA1-A947-11CF-8EE4-00C00C205365" def createFields(self): yield GUID(self, "guid") yield filesizeHandler(UInt64(self, "file_size")) yield TimestampWin64(self, "creation_date") yield UInt64(self, "pckt_count") yield TimedeltaWin64(self, "play_duration") yield TimedeltaWin64(self, "send_duration") yield UInt64(self, "preroll") yield Bit(self, "broadcast", "Is broadcast?") yield Bit(self, "seekable", "Seekable stream?") yield PaddingBits(self, "reserved[]", 30) yield filesizeHandler(UInt32(self, "min_pckt_size")) yield filesizeHandler(UInt32(self, "max_pckt_size")) yield displayHandler(UInt32(self, "max_bitrate"), humanBitRate) class HeaderExtension(FieldSet): guid = "5FBF03B5-A92E-11CF-8EE3-00C00C205365" def createFields(self): yield GUID(self, "reserved[]") yield UInt16(self, "reserved[]") yield UInt32(self, "size") if self["size"].value: yield RawBytes(self, "data", self["size"].value) class Header(FieldSet): guid = "75B22630-668E-11CF-A6D9-00AA0062CE6C" def createFields(self): yield UInt32(self, "obj_count") yield PaddingBytes(self, "reserved[]", 2) for index in xrange(self["obj_count"].value): yield Object(self, "object[]") class Metadata(FieldSet): guid = "75B22633-668E-11CF-A6D9-00AA0062CE6C" names = ("title", "author", "copyright", "xxx", "yyy") def createFields(self): for index in xrange(5): yield UInt16(self, "size[]") for name, size in izip(self.names, self.array("size")): if size.value: yield String(self, name, size.value, charset="UTF-16-LE", strip=" \0") class Descriptor(FieldSet): """ See ExtendedContentDescription class. """ TYPE_BYTE_ARRAY = 1 TYPE_NAME = { 0: "Unicode", 1: "Byte array", 2: "BOOL (32 bits)", 3: "DWORD (32 bits)", 4: "QWORD (64 bits)", 5: "WORD (16 bits)" } def createFields(self): yield PascalString16(self, "name", "Name", charset="UTF-16-LE", strip="\0") yield Enum(UInt16(self, "type"), self.TYPE_NAME) yield UInt16(self, "value_length") type = self["type"].value size = self["value_length"].value name = "value" if type == 0 and (size % 2) == 0: yield String(self, name, size, charset="UTF-16-LE", strip="\0") elif type in (2, 3): yield UInt32(self, name) elif type == 4: yield UInt64(self, name) else: yield RawBytes(self, name, size) class ExtendedContentDescription(FieldSet): guid = "D2D0A440-E307-11D2-97F0-00A0C95EA850" def createFields(self): yield UInt16(self, "count") for index in xrange(self["count"].value): yield Descriptor(self, "descriptor[]") class Codec(FieldSet): """ See CodecList class. """ type_name = { 1: "video", 2: "audio" } def createFields(self): yield Enum(UInt16(self, "type"), self.type_name) yield UInt16(self, "name_len", "Name length in character (byte=len*2)") if self["name_len"].value: yield String(self, "name", self["name_len"].value*2, "Name", charset="UTF-16-LE", strip=" \0") yield UInt16(self, "desc_len", "Description length in character (byte=len*2)") if self["desc_len"].value: yield String(self, "desc", self["desc_len"].value*2, "Description", charset="UTF-16-LE", strip=" \0") yield UInt16(self, "info_len") if self["info_len"].value: yield RawBytes(self, "info", self["info_len"].value) class CodecList(FieldSet): guid = "86D15240-311D-11D0-A3A4-00A0C90348F6" def createFields(self): yield GUID(self, "reserved[]") yield UInt32(self, "count") for index in xrange(self["count"].value): yield Codec(self, "codec[]") class SimpleIndexEntry(FieldSet): """ See SimpleIndex class. """ def createFields(self): yield UInt32(self, "pckt_number") yield UInt16(self, "pckt_count") class SimpleIndex(FieldSet): guid = "33000890-E5B1-11CF-89F4-00A0C90349CB" def createFields(self): yield GUID(self, "file_id") yield TimedeltaWin64(self, "entry_interval") yield UInt32(self, "max_pckt_count") yield UInt32(self, "entry_count") for index in xrange(self["entry_count"].value): yield SimpleIndexEntry(self, "entry[]") class BitRate(FieldSet): """ See BitRateList class. """ def createFields(self): yield Bits(self, "stream_index", 7) yield PaddingBits(self, "reserved", 9) yield displayHandler(UInt32(self, "avg_bitrate"), humanBitRate) class BitRateList(FieldSet): guid = "7BF875CE-468D-11D1-8D82-006097C9A2B2" def createFields(self): yield UInt16(self, "count") for index in xrange(self["count"].value): yield BitRate(self, "bit_rate[]") class Data(FieldSet): guid = "75B22636-668E-11CF-A6D9-00AA0062CE6C" def createFields(self): yield GUID(self, "file_id") yield UInt64(self, "packet_count") yield PaddingBytes(self, "reserved", 2) size = (self.size - self.current_size) / 8 yield RawBytes(self, "data", size) class StreamProperty(FieldSet): guid = "B7DC0791-A9B7-11CF-8EE6-00C00C205365" def createFields(self): yield GUID(self, "type") yield GUID(self, "error_correction") yield UInt64(self, "time_offset") yield UInt32(self, "data_len") yield UInt32(self, "error_correct_len") yield Bits(self, "stream_index", 7) yield Bits(self, "reserved[]", 8) yield Bit(self, "encrypted", "Content is encrypted?") yield UInt32(self, "reserved[]") size = self["data_len"].value if size: tag = self["type"].value if tag in Object.TAG_INFO: name, parser = Object.TAG_INFO[tag][0:2] yield parser(self, name, size=size*8) else: yield RawBytes(self, "data", size) size = self["error_correct_len"].value if size: yield RawBytes(self, "error_correct", size) class Object(FieldSet): # This list is converted to a dictionnary later where the key is the GUID TAG_INFO = ( ("header", Header, "Header object"), ("file_prop", FileProperty, "File property"), ("header_ext", HeaderExtension, "Header extension"), ("codec_list", CodecList, "Codec list"), ("simple_index", SimpleIndex, "Simple index"), ("data", Data, "Data object"), ("stream_prop[]", StreamProperty, "Stream properties"), ("bit_rates", BitRateList, "Bit rate list"), ("ext_desc", ExtendedContentDescription, "Extended content description"), ("metadata", Metadata, "Metadata"), ("video_header", VideoHeader, "Video"), ("audio_header", AudioHeader, "Audio"), ("bitrate_mutex", BitrateMutualExclusion, "Bitrate mutual exclusion"), ) def __init__(self, *args, **kw): FieldSet.__init__(self, *args, **kw) tag = self["guid"].value if tag not in self.TAG_INFO: self.handler = None return info = self.TAG_INFO[tag] self._name = info[0] self.handler = info[1] def createFields(self): yield GUID(self, "guid") yield filesizeHandler(UInt64(self, "size")) size = self["size"].value - self.current_size/8 if 0 < size: if self.handler: yield self.handler(self, "content", size=size*8) else: yield RawBytes(self, "content", size) tag_info_list = Object.TAG_INFO Object.TAG_INFO = dict( (parser[1].guid, parser) for parser in tag_info_list ) class AsfFile(Parser): MAGIC = "\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C" PARSER_TAGS = { "id": "asf", "category": "video", "file_ext": ("wmv", "wma", "asf"), "mime": (u"video/x-ms-asf", u"video/x-ms-wmv", u"audio/x-ms-wma"), "min_size": 24*8, "description": "Advanced Streaming Format (ASF), used for WMV (video) and WMA (audio)", "magic": ((MAGIC, 0),), } FILE_TYPE = { "video/x-ms-wmv": (".wmv", u"Window Media Video (wmv)"), "video/x-ms-asf": (".asf", u"ASF container"), "audio/x-ms-wma": (".wma", u"Window Media Audio (wma)"), } endian = LITTLE_ENDIAN def validate(self): magic = self.MAGIC if self.stream.readBytes(0, len(magic)) != magic: return "Invalid magic" header = self[0] if not(30 <= header["size"].value <= MAX_HEADER_SIZE): return "Invalid header size (%u)" % header["size"].value return True def createMimeType(self): audio = False for prop in self.array("header/content/stream_prop"): guid = prop["content/type"].value if guid == VideoHeader.guid: return u"video/x-ms-wmv" if guid == AudioHeader.guid: audio = True if audio: return u"audio/x-ms-wma" else: return u"video/x-ms-asf" def createFields(self): while not self.eof: yield Object(self, "object[]") def createDescription(self): return self.FILE_TYPE[self.mime_type][1] def createFilenameSuffix(self): return self.FILE_TYPE[self.mime_type][0] def createContentSize(self): if self[0].name != "header": return None return self["header/content/file_prop/content/file_size"].value * 8
40223108/w18
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/pydoc_data/topics.py
694
# -*- coding: utf-8 -*- # Autogenerated by Sphinx on Sat Mar 23 15:42:31 2013 topics = {'assert': '\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an asterisk,\n called a "starred" target: The object must be a sequence with at\n least as many items as there are targets in the target list, minus\n one. The first items of the sequence are assigned, from left to\n right, to the targets before the starred target. The final items\n of the sequence are assigned to the targets after the starred\n target. A list of the remaining items in the sequence is then\n assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of items\n as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` or ``nonlocal``\n statement in the current code block: the name is bound to the\n object in the current local namespace.\n\n * Otherwise: the name is bound to the object in the global namespace\n or the outer namespace determined by ``nonlocal``, respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, ``IndexError`` is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the ``__setitem__()`` method is called\n with appropriate arguments.\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to integers. If either bound is\n negative, the sequence\'s length is added to it. The resulting\n bounds are clipped to lie between zero and the sequence\'s length,\n inclusive. Finally, the sequence object is asked to replace the\n slice with the items of the assigned sequence. The length of the\n slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print(x)\n\nSee also:\n\n **PEP 3132** - Extended Iterable Unpacking\n The specification for the ``*target`` feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n', 'atom-literals': "\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", 'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when ``dir()`` is called on the object. A sequence must be\n returned. ``dir()`` converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to an object instance, ``a.x`` is transformed into the\n call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a class, ``A.x`` is transformed into the call:\n ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``int``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n', 'attribute-references': '\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier (which can\nbe customized by overriding the ``__getattr__()`` method). If this\nattribute is not available, the exception ``AttributeError`` is\nraised. Otherwise, the type and value of the object produced is\ndetermined by the object. Multiple evaluations of the same attribute\nreference may yield different objects.\n', 'augassign': '\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'binary': '\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe ``*`` (multiplication) operator yields the product of its\narguments. The arguments must either both be numbers, or one argument\nmust be an integer and the other must be a sequence. In the former\ncase, the numbers are converted to a common type and then multiplied\ntogether. In the latter case, sequence repetition is performed; a\nnegative repetition factor yields an empty sequence.\n\nThe ``/`` (division) and ``//`` (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Integer division yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the ``ZeroDivisionError`` exception.\n\nThe ``%`` (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n``ZeroDivisionError`` exception. The arguments may be floating point\nnumbers, e.g., ``3.14%0.7`` equals ``0.34`` (since ``3.14`` equals\n``4*0.7 + 0.34``.) The modulo operator always yields a result with\nthe same sign as its second operand (or zero); the absolute value of\nthe result is strictly smaller than the absolute value of the second\noperand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: ``x == (x//y)*y + (x%y)``. Floor division and modulo are\nalso connected with the built-in function ``divmod()``: ``divmod(x, y)\n== (x//y, x%y)``. [2].\n\nIn addition to performing the modulo operation on numbers, the ``%``\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the ``divmod()``\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the ``abs()`` function if appropriate.\n\nThe ``+`` (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe ``-`` (subtraction) operator yields the difference of its\narguments. The numeric arguments are first converted to a common\ntype.\n', 'bitwise': '\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe ``&`` operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe ``^`` operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe ``|`` operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n', 'bltin-code-objects': '\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin ``compile()`` function and can be extracted from function objects\nthrough their ``__code__`` attribute. See also the ``code`` module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the ``exec()`` or ``eval()`` built-in functions.\n\nSee *The standard type hierarchy* for more information.\n', 'bltin-ellipsis-object': '\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n``Ellipsis`` (a built-in name). ``type(Ellipsis)()`` produces the\n``Ellipsis`` singleton.\n\nIt is written as ``Ellipsis`` or ``...``.\n', 'bltin-null-object': "\nThe Null Object\n***************\n\nThis object is returned by functions that don't explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named ``None`` (a built-in name). ``type(None)()`` produces\nthe same singleton.\n\nIt is written as ``None``.\n", 'bltin-type-objects': "\nType Objects\n************\n\nType objects represent the various object types. An object's type is\naccessed by the built-in function ``type()``. There are no special\noperations on types. The standard module ``types`` defines names for\nall standard built-in types.\n\nTypes are written like this: ``<class 'int'>``.\n", 'booleans': '\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a ``__bool__()`` method.\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n', 'break': '\nThe ``break`` statement\n***********************\n\n break_stmt ::= "break"\n\n``break`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition\nwithin that loop.\n\nIt terminates the nearest enclosing loop, skipping the optional\n``else`` clause if the loop has one.\n\nIf a ``for`` loop is terminated by ``break``, the loop control target\nkeeps its current value.\n\nWhen ``break`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the loop.\n', 'callable-types': '\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n', 'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n``__call__()`` method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to an iterable. Elements from this\niterable are treated as if they were additional positional arguments;\nif there are positional arguments *x1*, ..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n', 'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nthe ``==`` and ``!=`` operators *always* consider objects of different\ntypes to be unequal, while the ``<``, ``>``, ``>=`` and ``<=``\noperators raise a ``TypeError`` when comparing objects of different\ntypes that do not implement these operators for the given pair of\ntypes. You can control comparison behavior of objects of non-built-in\ntypes by defining rich comparison methods like ``__gt__()``, described\nin section *Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values ``float(\'NaN\')`` and ``Decimal(\'NaN\')`` are special. The\n are identical to themselves, ``x is x`` but are not equal to\n themselves, ``x != x``. Additionally, comparing any value to a\n not-a-number value will return ``False``. For example, both ``3 <\n float(\'NaN\')`` and ``float(\'NaN\') < 3`` will return ``False``.\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``[1,2,x] <= [1,2,y]`` has the\n same value as ``x <= y``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same ``(key, value)`` pairs. Order comparisons ``(\'<\', \'<=\', \'>=\',\n \'>\')`` raise ``TypeError``.\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets ``{1,2}`` and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, ``min()``, ``max()``, and ``sorted()`` produce\n undefined results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n``NotImplemented``.\n\nThe operators ``in`` and ``not in`` test for membership. ``x in s``\nevaluates to true if *x* is a member of *s*, and false otherwise. ``x\nnot in s`` returns the negation of ``x in s``. All built-in sequences\nand set types support this as well as dictionary, for which ``in``\ntests whether a the dictionary has a given key. For container types\nsuch as list, tuple, set, frozenset, dict, or collections.deque, the\nexpression ``x in y`` is equivalent to ``any(x is e or x == e for e in\ny)``.\n\nFor the string and bytes types, ``x in y`` is true if and only if *x*\nis a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nEmpty strings are always considered to be a substring of any other\nstring, so ``"" in "abc"`` will return ``True``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [4]\n', 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements, while the ``with`` statement\nallows the execution of initialization and finalization code around a\nblock of code. Function and class definitions are also syntactically\ncompound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print()`` calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception it is re-raised at the end of\nthe ``finally`` clause. If the ``finally`` clause raises another\nexception, the saved exception is set as the context of the new\nexception. If the ``finally`` clause executes a ``return`` or\n``break`` statement, the saved exception is discarded:\n\n def f():\n try:\n 1/0\n finally:\n return 42\n\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'continue': '\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n', 'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works that way:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_info()[2]`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.last_traceback``. Circular references which are garbage are\n detected when the option cycle detector is enabled (it\'s on by\n default), but can only be cleaned up if there are no Python-\n level ``__del__()`` methods involved. Refer to the documentation\n for the ``gc`` module for more information about how\n ``__del__()`` methods are handled by the cycle detector,\n particularly the description of the ``garbage`` value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function to compute the\n "official" string representation of an object. If at all possible,\n this should look like a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n ``<...some useful description...>`` should be returned. The return\n value must be a string object. If a class defines ``__repr__()``\n but not ``__str__()``, then ``__repr__()`` is also used when an\n "informal" string representation of instances of that class is\n required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by ``str(object)`` and the built-in functions ``format()``\n and ``print()`` to compute the "informal" or nicely printable\n string representation of an object. The return value must be a\n *string* object.\n\n This method differs from ``object.__repr__()`` in that there is no\n expectation that ``__str__()`` return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type ``object``\n calls ``object.__repr__()``.\n\nobject.__bytes__(self)\n\n Called by ``bytes()`` to compute a byte-string representation of an\n object. This should return a ``bytes`` object.\n\nobject.__format__(self, format_spec)\n\n Called by the ``format()`` built-in function (and by extension, the\n ``str.format()`` method of class ``str``) to produce a "formatted"\n string representation of an object. The ``format_spec`` argument is\n a string that contains a description of the formatting options\n desired. The interpretation of the ``format_spec`` argument is up\n to the type implementing ``__format__()``, however most classes\n will either delegate formatting to one of the built-in types, or\n use a similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` calls\n ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and ``x>=y`` calls\n ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define an ``__eq__()`` method it should not\n define a ``__hash__()`` operation either; if it defines\n ``__eq__()`` but not ``__hash__()``, its instances will not be\n usable as items in hashable collections. If a class defines\n mutable objects and implements an ``__eq__()`` method, it should\n not implement ``__hash__()``, since the implementation of hashable\n collections requires that a key\'s hash value is immutable (if the\n object\'s hash value changes, it will be in the wrong hash bucket).\n\n User-defined classes have ``__eq__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns an appropriate value such\n that ``x == y`` implies both that ``x is y`` and ``hash(x) ==\n hash(y)``.\n\n A class that overrides ``__eq__()`` and does not define\n ``__hash__()`` will have its ``__hash__()`` implicitly set to\n ``None``. When the ``__hash__()`` method of a class is ``None``,\n instances of the class will raise an appropriate ``TypeError`` when\n a program attempts to retrieve their hash value, and will also be\n correctly identified as unhashable when checking ``isinstance(obj,\n collections.Hashable``).\n\n If a class that overrides ``__eq__()`` needs to retain the\n implementation of ``__hash__()`` from a parent class, the\n interpreter must be told this explicitly by setting ``__hash__ =\n <ParentClass>.__hash__``.\n\n If a class that does not override ``__eq__()`` wishes to suppress\n hash support, it should include ``__hash__ = None`` in the class\n definition. A class which defines its own ``__hash__()`` that\n explicitly raises a ``TypeError`` would be incorrectly identified\n as hashable by an ``isinstance(obj, collections.Hashable)`` call.\n\n Note: By default, the ``__hash__()`` values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also ``PYTHONHASHSEED``.\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``. When this method\n is not defined, ``__len__()`` is called, if it is defined, and the\n object is considered true if its result is nonzero. If a class\n defines neither ``__len__()`` nor ``__bool__()``, all its instances\n are considered true.\n', 'debugger': '\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the ``readline`` module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the ``print`` command.\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: ``pdb.py`` now accepts a ``-c`` option that\nexecutes commands as if given in a ``.pdbrc`` file, see *Debugger\nCommands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``continue`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type ``continue``, or you can\n step through the statement using ``step`` or ``next`` (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module ``__main__`` is used. (See\n the explanation of the built-in ``exec()`` or ``eval()``\n functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When ``runeval()`` returns, it returns the\n value of the expression. Otherwise this function is similar to\n ``run()``.\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n ``continue`` command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n``h(elp)`` means that either ``h`` or ``help`` can be used to enter\nthe help command (but not ``he`` or ``hel``, nor ``H`` or ``Help`` or\n``HELP``). Arguments to commands must be separated by whitespace\n(spaces or tabs). Optional arguments are enclosed in square brackets\n(``[]``) in the command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n(``|``).\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a ``list`` command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint (``!``). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by\n``;;``. (A single ``;`` is not used as it is the separator for\nmultiple commands in a line that is passed to the Python parser.) No\nintelligence is applied to separating the commands; the input is split\nat the first ``;;`` pair, even if it is in the middle of a quoted\nstring.\n\nIf a file ``.pdbrc`` exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ``.pdbrc`` can now contain commands that\ncontinue debugging, such as ``continue`` or ``next``. Previously,\nthese commands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. ``help pdb``\n displays the full documentation (the docstring of the ``pdb``\n module). Since the *command* argument must be an identifier,\n ``help exec`` must be entered to get help on the ``!`` command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on ``sys.path``. Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for ``break``.\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just ``end`` to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) print some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with ``end``; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between ``next`` and\n ``step`` is that ``step`` stops inside a called function, while\n ``next`` executes called functions at (nearly) full speed, only\n stopping at the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a ``for`` loop or out\n of a ``finally`` clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With ``.`` as argument, list 11 lines around the current line.\n With one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by ``->``. If\n an exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ``>>``, if it\n differs from the current line.\n\n New in version 3.2: The ``>>`` marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for ``list``.\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np(rint) expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\npp expression\n\n Like the ``print`` command, except the value of the expression is\n pretty-printed using the ``pprint`` module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the ``code`` module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by ``%1``, ``%2``, and so on, while ``%*`` is replaced by\n all the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ``.pdbrc`` file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n ``global`` statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with ``shlex`` and the result is used as the new\n ``sys.argv``. History, breakpoints, actions and debugger options\n are preserved. ``restart`` is an alias for ``run``.\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module is\n determined by the ``__name__`` in the frame globals.\n', 'del': '\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n', 'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'else': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions ``eval()`` and ``exec()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as ``nonlocal``. If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, or\nafter ``as`` in a ``with`` statement or ``except`` clause. The\n``import`` statement of the form ``from ... import *`` binds all names\ndefined in the imported module, except those beginning with an\nunderscore. This form may only be used at the module level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the ``global`` statement occurs within a block, all uses of the\nname specified in the statement refer to the binding of that name in\nthe top-level namespace. Names are resolved in the top-level\nnamespace by searching the global namespace, i.e. the namespace of the\nmodule containing the code block, and the builtins namespace, the\nnamespace of the module ``builtins``. The global namespace is\nsearched first. If the name is not found there, the builtins\nnamespace is searched. The global statement must precede all uses of\nthe name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``builtins``; when in any other module, ``__builtins__`` is an alias\nfor the dictionary of the ``builtins`` module itself.\n``__builtins__`` can be set to a user-created dictionary to create a\nweak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``builtins`` module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', 'exprlists': '\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n', 'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, ``077e010`` is legal, and denotes the same\nnumber as ``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n', 'for': '\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': '\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s" | "a"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings ``\'10\'`` or\n``\':-]\'``) within a format string. The *arg_name* can be followed by\nany number of index or attribute expressions. An expression of the\nform ``\'.name\'`` selects the named attribute using ``getattr()``,\nwhile an expression of the form ``\'[index]\'`` does an index lookup\nusing ``__getitem__()``.\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nThree conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, ``\'!r\'`` which calls ``repr()`` and ``\'!a\'``\nwhich calls ``ascii()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <a character other than \'{\' or \'}\'>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'{\' or \'}\'. The\npresence of a fill character is signaled by the character following\nit, which must be one of the alignment options. If the second\ncharacter of *format_spec* is not a valid alignment option, then it is\nassumed that both the fill character and the alignment option are\nabsent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective ``\'0b\'``, ``\'0o\'``, or\n``\'0x\'`` to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for ``\'g\'`` and ``\'G\'``\nconversions, trailing zeros are not removed from the result.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 3.1: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero (``\'0\'``) character enables\nsign-aware zero-padding for numeric types. This is equivalent to a\n*fill* character of ``\'0\'`` with an *alignment* type of ``\'=\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``, but converts ``nan`` to |\n | | ``NAN`` and ``inf`` to ``INF``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Positive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to ``\'g\'``, except with at least one digit past |\n | | the decimal point and a default precision of 12. This is |\n | | intended to match ``str()``, except you can add the other |\n | | format modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n', 'global': '\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in a string\nor code object supplied to the built-in ``exec()`` function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by ``global`` statements in\nthe code containing the function call. The same applies to the\n``eval()`` and ``compile()`` functions.\n', 'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters ``A`` through ``Z``, the underscore ``_`` and, except for the\nfirst character, the digits ``0`` through ``9``.\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n``unicodedata`` module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= <all characters in general categories Lu, Ll, Lt, Lm, Lo, Nl, the underscore, and characters with the Other_ID_Start property>\n id_continue ::= <all characters in id_start, plus characters in the categories Mn, Mc, Nd, Pc and others with the Other_ID_Continue property>\n xid_start ::= <all characters in id_start whose NFKC normalization is in "id_start xid_continue*">\n xid_continue ::= <all characters in id_continue whose NFKC normalization is in "id_continue*">\n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'if': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n', 'imaginary': '\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': '\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no ``from`` clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope where\n the ``import`` statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules is\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by ``as``, then the name following\n ``as`` is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe ``from`` form uses a slightly more complex process:\n\n1. find the module specified in the ``from`` clause loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the ``import`` clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, ``ImportError`` is raised.\n\n 4. otherwise, a reference to that value is bound in the local\n namespace, using the name in the ``as`` clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star (``\'*\'``), all public\nnames defined in the module are bound in the local namespace for the\nscope where the ``import`` statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope.\nAttempting to use it in class or function definitions will raise a\n``SyntaxError``.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. The\nwild card form of import --- ``import *`` --- is only allowed at the\nmodule level. Attempting to use it in class or function definitions\nwill raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimport mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are ``absolute_import``,\n``division``, ``generators``, ``unicode_literals``,\n``print_function``, ``nested_scopes`` and ``with_statement``. They\nare all redundant because they are always enabled, and only kept for\nbackwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions ``exec()`` and\n``compile()`` that occur in a module ``M`` containing a future\nstatement will, by default, use the new syntax or semantics associated\nwith the future statement. This can be controlled by optional\narguments to ``compile()`` --- see the documentation of that function\nfor details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', 'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nthe ``==`` and ``!=`` operators *always* consider objects of different\ntypes to be unequal, while the ``<``, ``>``, ``>=`` and ``<=``\noperators raise a ``TypeError`` when comparing objects of different\ntypes that do not implement these operators for the given pair of\ntypes. You can control comparison behavior of objects of non-built-in\ntypes by defining rich comparison methods like ``__gt__()``, described\nin section *Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values ``float(\'NaN\')`` and ``Decimal(\'NaN\')`` are special. The\n are identical to themselves, ``x is x`` but are not equal to\n themselves, ``x != x``. Additionally, comparing any value to a\n not-a-number value will return ``False``. For example, both ``3 <\n float(\'NaN\')`` and ``float(\'NaN\') < 3`` will return ``False``.\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``[1,2,x] <= [1,2,y]`` has the\n same value as ``x <= y``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same ``(key, value)`` pairs. Order comparisons ``(\'<\', \'<=\', \'>=\',\n \'>\')`` raise ``TypeError``.\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets ``{1,2}`` and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, ``min()``, ``max()``, and ``sorted()`` produce\n undefined results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n``NotImplemented``.\n\nThe operators ``in`` and ``not in`` test for membership. ``x in s``\nevaluates to true if *x* is a member of *s*, and false otherwise. ``x\nnot in s`` returns the negation of ``x in s``. All built-in sequences\nand set types support this as well as dictionary, for which ``in``\ntests whether a the dictionary has a given key. For container types\nsuch as list, tuple, set, frozenset, dict, or collections.deque, the\nexpression ``x in y`` is equivalent to ``any(x is e or x == e for e in\ny)``.\n\nFor the string and bytes types, ``x in y`` is true if and only if *x*\nis a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nEmpty strings are always considered to be a substring of any other\nstring, so ``"" in "abc"`` will return ``True``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [4]\n', 'integers': '\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n', 'lambda': '\nLambdas\n*******\n\n lambda_form ::= "lambda" [parameter_list]: expression\n lambda_form_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda forms (lambda expressions) have the same syntactic position as\nexpressions. They are a shorthand to create anonymous functions; the\nexpression ``lambda arguments: expression`` yields a function object.\nThe unnamed object behaves like a function object defined with\n\n def <lambda>(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda forms cannot contain\nstatements or annotations.\n', 'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n', 'naming': "\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The string argument passed\nto the built-in functions ``eval()`` and ``exec()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as ``nonlocal``. If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, or\nafter ``as`` in a ``with`` statement or ``except`` clause. The\n``import`` statement of the form ``from ... import *`` binds all names\ndefined in the imported module, except those beginning with an\nunderscore. This form may only be used at the module level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the ``global`` statement occurs within a block, all uses of the\nname specified in the statement refer to the binding of that name in\nthe top-level namespace. Names are resolved in the top-level\nnamespace by searching the global namespace, i.e. the namespace of the\nmodule containing the code block, and the builtins namespace, the\nnamespace of the module ``builtins``. The global namespace is\nsearched first. If the name is not found there, the builtins\nnamespace is searched. The global statement must precede all uses of\nthe name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``builtins``; when in any other module, ``__builtins__`` is an alias\nfor the dictionary of the ``builtins`` module itself.\n``__builtins__`` can be set to a user-created dictionary to create a\nweak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``builtins`` module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n", 'nonlocal': '\nThe ``nonlocal`` statement\n**************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe ``nonlocal`` statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope. This is\nimportant because the default behavior for binding is to search the\nlocal namespace first. The statement allows encapsulated code to\nrebind variables outside of the local scope besides the global\n(module) scope.\n\nNames listed in a ``nonlocal`` statement, unlike to those listed in a\n``global`` statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a ``nonlocal`` statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also:\n\n **PEP 3104** - Access to Names in Outer Scopes\n The specification for the ``nonlocal`` statement.\n', 'numbers': "\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n", 'numeric-types': "\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``). For instance, to evaluate the expression ``x + y``, where\n *x* is an instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()``. Note that\n ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``) with reflected (swapped) operands. These functions are only\n called if the left operand does not support the corresponding\n operation and the operands are of different types. [2] For\n instance, to evaluate the expression ``x - y``, where *y* is an\n instance of a class that has an ``__rsub__()`` method,\n ``y.__rsub__(x)`` is called if ``x.__sub__(y)`` returns\n *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand's type is a subclass of the left operand's\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand's\n non-reflected method. This behavior allows subclasses to\n override their ancestors' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``float()`` and ``round()``. Should return a value of\n the appropriate type.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing, or in the\n built-in ``bin()``, ``hex()`` and ``oct()`` functions). Must return\n an integer.\n", 'objects': '\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, ``id(x)`` is the\nmemory address where ``x`` is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The ``type()`` function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement and the \'``with``\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n', 'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` ``x`` | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not in``, ``is``, ``is not``, ``<``, | Comparisons, including membership |\n| ``<=``, ``>``, ``>=``, ``!=``, ``==`` | tests and identity tests, |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key: value...}``, ``{expressions...}`` | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. The function\n ``math.fmod()`` returns a result whose sign matches the sign of\n the first argument instead, and so returns ``-1e-100`` in this\n case. Which approach is more appropriate depends on the\n application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for ``x//y`` to be one larger than ``(x-x%y)//y`` due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[3] While comparisons between strings make sense at the byte level,\n they may be counter-intuitive to users. For example, the strings\n ``"\\u00C7"`` and ``"\\u0327\\u0043"`` compare differently, even\n though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[4] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[5] The ``%`` operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n', 'pass': '\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': '\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n``10**2`` returns ``100``, but ``10**-2`` returns ``0.01``.\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``complex`` number. (In earlier versions it raised a\n``ValueError``.)\n', 'raise': '\nThe ``raise`` statement\n***********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, ``raise`` re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a ``RuntimeError`` exception is raised indicating\nthat this is an error.\n\nOtherwise, ``raise`` evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n``BaseException``. If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the ``__traceback__`` attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the ``with_traceback()`` exception method (which\nreturns the same exception instance, with its traceback set to its\nargument), like so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe ``from`` clause is used for exception chaining: if given, the\nsecond *expression* must be another exception class or instance, which\nwill then be attached to the raised exception as the ``__cause__``\nattribute (which is writable). If the raised exception is not\nhandled, both exceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s ``__context__`` attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "<stdin>", line 4, in <module>\n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', 'return': '\nThe ``return`` statement\n************************\n\n return_stmt ::= "return" [expression_list]\n\n``return`` may only occur syntactically nested in a function\ndefinition, not within a nested class definition.\n\nIf an expression list is present, it is evaluated, else ``None`` is\nsubstituted.\n\n``return`` leaves the current function call with the expression list\n(or ``None``) as return value.\n\nWhen ``return`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the function.\n\nIn a generator function, the ``return`` statement indicates that the\ngenerator is done and will cause ``StopIteration`` to be raised. The\nreturned value (if any) is used as an argument to construct\n``StopIteration`` and becomes the ``StopIteration.value`` attribute.\n', 'sequence-types': "\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``get()``,\n``clear()``, ``setdefault()``, ``pop()``, ``popitem()``, ``copy()``,\nand ``update()`` behaving similar to those for Python's standard\ndictionary objects. The ``collections`` module provides a\n``MutableMapping`` abstract base class to help create those methods\nfrom a base set of ``__getitem__()``, ``__setitem__()``,\n``__delitem__()``, and ``keys()``. Mutable sequences should provide\nmethods ``append()``, ``count()``, ``index()``, ``extend()``,\n``insert()``, ``pop()``, ``remove()``, ``reverse()`` and ``sort()``,\nlike Python standard list objects. Finally, sequence types should\nimplement addition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods ``__add__()``, ``__radd__()``,\n``__iadd__()``, ``__mul__()``, ``__rmul__()`` and ``__imul__()``\ndescribed below; they should not define other numerical operators. It\nis recommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should search the mapping's keys; for\nsequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``keys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn't define a ``__bool__()`` method and whose ``__len__()``\n method returns zero is considered to be false in a Boolean context.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with\n ``None``.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``keys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don't define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n", 'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2,n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,n)``.\n\nNote: In the current implementation, the right-hand operand is required to\n be at most ``sys.maxsize``. If the right-hand operand is larger\n than ``sys.maxsize`` an ``OverflowError`` exception is raised.\n', 'slicings': '\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary must evaluate\nto a mapping object, and it is indexed (using the same\n``__getitem__()`` method as normal subscription) with a key that is\nconstructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of a proper slice is a\nslice object (see section *The standard type hierarchy*) whose\n``start``, ``stop`` and ``step`` attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n', 'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n [<class \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property being\n one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase), or "Lt"\n (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n', 'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``type(x).__getitem__(x,\ni)``. Except where mentioned, attempts to execute an operation raise\nan exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_info()[2]`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.last_traceback``. Circular references which are garbage are\n detected when the option cycle detector is enabled (it\'s on by\n default), but can only be cleaned up if there are no Python-\n level ``__del__()`` methods involved. Refer to the documentation\n for the ``gc`` module for more information about how\n ``__del__()`` methods are handled by the cycle detector,\n particularly the description of the ``garbage`` value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function to compute the\n "official" string representation of an object. If at all possible,\n this should look like a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n ``<...some useful description...>`` should be returned. The return\n value must be a string object. If a class defines ``__repr__()``\n but not ``__str__()``, then ``__repr__()`` is also used when an\n "informal" string representation of instances of that class is\n required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by ``str(object)`` and the built-in functions ``format()``\n and ``print()`` to compute the "informal" or nicely printable\n string representation of an object. The return value must be a\n *string* object.\n\n This method differs from ``object.__repr__()`` in that there is no\n expectation that ``__str__()`` return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type ``object``\n calls ``object.__repr__()``.\n\nobject.__bytes__(self)\n\n Called by ``bytes()`` to compute a byte-string representation of an\n object. This should return a ``bytes`` object.\n\nobject.__format__(self, format_spec)\n\n Called by the ``format()`` built-in function (and by extension, the\n ``str.format()`` method of class ``str``) to produce a "formatted"\n string representation of an object. The ``format_spec`` argument is\n a string that contains a description of the formatting options\n desired. The interpretation of the ``format_spec`` argument is up\n to the type implementing ``__format__()``, however most classes\n will either delegate formatting to one of the built-in types, or\n use a similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` calls\n ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and ``x>=y`` calls\n ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define an ``__eq__()`` method it should not\n define a ``__hash__()`` operation either; if it defines\n ``__eq__()`` but not ``__hash__()``, its instances will not be\n usable as items in hashable collections. If a class defines\n mutable objects and implements an ``__eq__()`` method, it should\n not implement ``__hash__()``, since the implementation of hashable\n collections requires that a key\'s hash value is immutable (if the\n object\'s hash value changes, it will be in the wrong hash bucket).\n\n User-defined classes have ``__eq__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns an appropriate value such\n that ``x == y`` implies both that ``x is y`` and ``hash(x) ==\n hash(y)``.\n\n A class that overrides ``__eq__()`` and does not define\n ``__hash__()`` will have its ``__hash__()`` implicitly set to\n ``None``. When the ``__hash__()`` method of a class is ``None``,\n instances of the class will raise an appropriate ``TypeError`` when\n a program attempts to retrieve their hash value, and will also be\n correctly identified as unhashable when checking ``isinstance(obj,\n collections.Hashable``).\n\n If a class that overrides ``__eq__()`` needs to retain the\n implementation of ``__hash__()`` from a parent class, the\n interpreter must be told this explicitly by setting ``__hash__ =\n <ParentClass>.__hash__``.\n\n If a class that does not override ``__eq__()`` wishes to suppress\n hash support, it should include ``__hash__ = None`` in the class\n definition. A class which defines its own ``__hash__()`` that\n explicitly raises a ``TypeError`` would be incorrectly identified\n as hashable by an ``isinstance(obj, collections.Hashable)`` call.\n\n Note: By default, the ``__hash__()`` values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also ``PYTHONHASHSEED``.\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``. When this method\n is not defined, ``__len__()`` is called, if it is defined, and the\n object is considered true if its result is nonzero. If a class\n defines neither ``__len__()`` nor ``__bool__()``, all its instances\n are considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when ``dir()`` is called on the object. A sequence must be\n returned. ``dir()`` converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to an object instance, ``a.x`` is transformed into the\n call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a class, ``A.x`` is transformed into the call:\n ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``int``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using ``type()``. The class body\nis executed in a new namespace and the class name is bound locally to\nthe result of ``type(name, bases, namespace)``.\n\nThe class creation process can be customised by passing the\n``metaclass`` keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both ``MyClass`` and ``MySubclass`` are\ninstances of ``Meta``:\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then ``type()`` is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n ``type()``, then it is used directly as the metaclass\n\n* if an instance of ``type()`` is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. ``type(cls)``) of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with ``TypeError``.\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a ``__prepare__``\nattribute, it is called as ``namespace = metaclass.__prepare__(name,\nbases, **kwds)`` (where the additional keyword arguments, if any, come\nfrom the class definition).\n\nIf the metaclass has no ``__prepare__`` attribute, then the class\nnamespace is initialised as an empty ``dict()`` instance.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3000\n Introduced the ``__prepare__`` namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as ``exec(body, globals(),\nnamespace)``. The key difference from a normal call to ``exec()`` is\nthat lexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling ``metaclass(name, bases,\nnamespace, **kwds)`` (the additional keywords passed here are the same\nas those passed to ``__prepare__``).\n\nThis class object is the one that will be referenced by the zero-\nargument form of ``super()``. ``__class__`` is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either ``__class__`` or ``super``. This allows the zero argument\nform of ``super()`` to correctly identify the class being defined\nbased on lexical scoping, while the class or instance that was used to\nmake the current call is identified based on the first argument passed\nto the method.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also:\n\n **PEP 3135** - New super\n Describes the implicit ``__class__`` closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n``collections.OrderedDict`` to remember the order that class members\nwere defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s ``__prepare__()`` method which returns an\nempty ``collections.OrderedDict``. That mapping records the methods\nand attributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s ``__new__()`` method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called ``members``.\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``get()``,\n``clear()``, ``setdefault()``, ``pop()``, ``popitem()``, ``copy()``,\nand ``update()`` behaving similar to those for Python\'s standard\ndictionary objects. The ``collections`` module provides a\n``MutableMapping`` abstract base class to help create those methods\nfrom a base set of ``__getitem__()``, ``__setitem__()``,\n``__delitem__()``, and ``keys()``. Mutable sequences should provide\nmethods ``append()``, ``count()``, ``index()``, ``extend()``,\n``insert()``, ``pop()``, ``remove()``, ``reverse()`` and ``sort()``,\nlike Python standard list objects. Finally, sequence types should\nimplement addition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods ``__add__()``, ``__radd__()``,\n``__iadd__()``, ``__mul__()``, ``__rmul__()`` and ``__imul__()``\ndescribed below; they should not define other numerical operators. It\nis recommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should search the mapping\'s keys; for\nsequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``keys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__bool__()`` method and whose ``__len__()``\n method returns zero is considered to be false in a Boolean context.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with\n ``None``.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``keys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``). For instance, to evaluate the expression ``x + y``, where\n *x* is an instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()``. Note that\n ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``,\n ``divmod()``, ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``,\n ``|``) with reflected (swapped) operands. These functions are only\n called if the left operand does not support the corresponding\n operation and the operands are of different types. [2] For\n instance, to evaluate the expression ``x - y``, where *y* is an\n instance of a class that has an ``__rsub__()`` method,\n ``y.__rsub__(x)`` is called if ``x.__sub__(y)`` returns\n *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``float()`` and ``round()``. Should return a value of\n the appropriate type.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing, or in the\n built-in ``bin()``, ``hex()`` and ``oct()`` functions). Must return\n an integer.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n', 'string-methods': '\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see ``str.format()``,\n*Format String Syntax* and *String Formatting*) and the other based on\nC ``printf`` style formatting that handles a narrower range of types\nand is slightly harder to use correctly, but is often faster for the\ncases it can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the ``re`` module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter ``\'\xc3\x9f\'`` is equivalent to\n ``"ss"``. Since it is already lowercase, ``lower()`` would do\n nothing to ``\'\xc3\x9f\'``; ``casefold()`` converts it to ``"ss"``.\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is ``\'utf-8\'``. *errors* may be given to set a different\n error handling scheme. The default for *errors* is ``\'strict\'``,\n meaning that encoding errors raise a ``UnicodeError``. Other\n possible values are ``\'ignore\'``, ``\'replace\'``,\n ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and any other name\n registered via ``codecs.register_error()``, see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by zero or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to ``str.format(**mapping)``, except that ``mapping`` is\n used directly and not copied to a ``dict`` . This is useful if for\n example ``mapping`` is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character\n ``c`` is alphanumeric if one of the following returns ``True``:\n ``c.isalpha()``, ``c.isdecimal()``, ``c.isdigit()``, or\n ``c.isnumeric()``.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when ``repr()`` is\n invoked on a string. It has no bearing on the handling of strings\n written to ``sys.stdout`` or ``sys.stderr``.)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A ``TypeError`` will be raised if there are\n any non-string values in *iterable*, including ``bytes`` objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n ``str.translate()``.\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified or ``-1``, then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, ``\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()`` returns\n ``[\'ab c\', \'\', \'de fg\', \'kl\']``, while the same call with\n ``splitlines(True)`` returns ``[\'ab c\\n\', \'\\n\', \'de fg\\r\',\n \'kl\\r\\n\']``.\n\n Unlike ``split()`` when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n ``s.swapcase().swapcase() == s``.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or ``None``. Unmapped\n characters are left untouched. Characters mapped to ``None`` are\n deleted.\n\n You can use ``str.maketrans()`` to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see\n ``encodings.cp1251`` for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n', 'strings': '\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n stringescapeseq ::= "\\" <any source character>\n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= <any ASCII character except "\\" or newline or the quote>\n longbyteschar ::= <any ASCII character except "\\">\n bytesescapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the ``stringprefix`` or\n``bytesprefix`` and the rest of the literal. The source character set\nis defined by the encoding declaration; it is UTF-8 if no encoding\ndeclaration is given in the source file; see section *Encoding\ndeclarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes (``\'``) or double quotes (``"``). They can also be\nenclosed in matching groups of three single or double quotes (these\nare generally referred to as *triple-quoted strings*). The backslash\n(``\\``) character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with ``\'b\'`` or ``\'B\'``; they\nproduce an instance of the ``bytes`` type instead of the ``str`` type.\nThey may only contain ASCII characters; bytes with a numeric value of\n128 or greater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n``u`` prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter ``\'r\'`` or ``\'R\'``; such strings are called *raw strings* and\ntreat backslashes as literal characters. As a result, in string\nliterals, ``\'\\U\'`` and ``\'\\u\'`` escapes in raw strings are not treated\nspecially. Given that Python 2.x\'s raw unicode literals behave\ndifferently than Python 3.x\'s the ``\'ur\'`` syntax is not supported.\n\n New in version 3.3: The ``\'rb\'`` prefix of raw bytes literals has\n been added as a synonym of ``\'br\'``.\n\n New in version 3.3: Support for the unicode legacy literal\n (``u\'value\'``) was reintroduced to simplify the maintenance of dual\n Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\N{name}`` | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the byte\n with the given value. In a string literal, these escapes denote a\n Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight hex\n digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, ``r"\\""`` is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; ``r"\\"`` is not a valid string literal (even a raw\nstring cannot end in an odd number of backslashes). Specifically, *a\nraw string cannot end in a single backslash* (since the backslash\nwould escape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n', 'subscriptions': '\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription,\ne.g. a list or dictionary. User-defined objects can support\nsubscription by defining a ``__getitem__()`` method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a ``__getitem__()``\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that ``x[-1]`` selects the last item of\n``x``). The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero). Since the support\nfor negative indices and slicing occurs in the object\'s\n``__getitem__()`` method, subclasses overriding this method will need\nto explicitly add that support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': "\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0.0``, ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__bool__()`` or ``__len__()`` method, when that method returns the\n integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n", 'try': '\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception it is re-raised at the end of\nthe ``finally`` clause. If the ``finally`` clause raises another\nexception, the saved exception is set as the context of the new\nexception. If the ``finally`` clause executes a ``return`` or\n``break`` statement, the saved exception is discarded:\n\n def f():\n try:\n 1/0\n finally:\n return 42\n\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n', 'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal ``...`` or the\n built-in name ``Ellipsis``. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers (``int``)\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans (``bool``)\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of the integer\n type, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex`` (``complex``)\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode\n codepoints. All the codepoints in range ``U+0000 - U+10FFFF``\n can be represented in a string. Python doesn\'t have a\n ``chr`` type, and every character in the string is\n represented as a string object with length ``1``. The built-\n in function ``ord()`` converts a character to its codepoint\n (as an integer); ``chr()`` converts an integer in range ``0 -\n 10FFFF`` to the corresponding character. ``str.encode()`` can\n be used to convert a ``str`` to ``bytes`` using the given\n encoding, and ``bytes.decode()`` can be used to achieve the\n opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like ``b\'abc\'``) and the built-in function\n ``bytes()`` can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the ``decode()``\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type, as does the ``collections`` module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm.ndbm`` and ``dbm.gnu`` provide\n additional examples of mapping types, as does the\n ``collections`` module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | ``__doc__`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +---------------------------+---------------------------------+-------------+\n | ``__name__`` | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | ``__qualname__`` | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | ``__defaults__`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +---------------------------+---------------------------------+-------------+\n | ``__code__`` | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | ``__globals__`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | ``__dict__`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | ``__closure__`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | ``__annotations__`` | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | or ``\'return\'`` for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | ``__kwdefaults__`` | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: ``__self__`` is the class instance\n object, ``__func__`` is the function object; ``__doc__`` is the\n method\'s documentation (same as ``__func__.__doc__``);\n ``__name__`` is the method name (same as ``__func__.__name__``);\n ``__module__`` is the name of the module the method was defined\n in, or ``None`` if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its ``__self__`` attribute is the instance, and the method\n object is said to be bound. The new method\'s ``__func__``\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``__func__``\n attribute of the new instance is not the original method object\n but its ``__func__`` attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its ``__self__``\n attribute is the class itself, and its ``__func__`` attribute is\n the function object underlying the class method.\n\n When an instance method object is called, the underlying\n function (``__func__``) is called, inserting the class instance\n (``__self__``) in front of the argument list. For instance,\n when ``C`` is a class which contains a definition for a function\n ``f()``, and ``x`` is an instance of ``C``, calling ``x.f(1)``\n is equivalent to calling ``C.f(x, 1)``.\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in ``__self__`` will\n actually be the class itself, so that calling either ``x.f(1)``\n or ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``iterator__next__()`` method will cause the\n function to execute until it provides a value using the\n ``yield`` statement. When the function executes a ``return``\n statement or falls off the end, a ``StopIteration`` exception is\n raised and the iterator will have reached the end of the set of\n values to be returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override ``__new__()``. The arguments of the\n call are passed to ``__new__()`` and, in the typical case, to\n ``__init__()`` to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a ``__call__()`` method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the ``import``\n statement (see ``import``), or by calling functions such as\n ``importlib.import_module()`` and built-in ``__import__()``. A\n module object has a namespace implemented by a dictionary object\n (this is the dictionary referenced by the ``__globals__`` attribute\n of functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., ``m.x`` is\n equivalent to ``m.__dict__["x"]``. A module object does not contain\n the code object used to initialize the module (since it isn\'t\n needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute may be missing for certain types of modules,\n such as C modules that are statically linked into the interpreter;\n for extension modules loaded dynamically from a shared library, it\n is the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., ``C.x`` is translated to\n ``C.__dict__["x"]`` (although there are a number of hooks which\n allow for other means of locating attributes). When the attribute\n name is not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a class method object, it is transformed into an instance method\n object whose ``__self__`` attributes is ``C``. When it would yield\n a static method object, it is transformed into the object wrapped\n by the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its ``__dict__``.\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose ``__self__`` attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s\n ``__dict__``. If no class attribute is found, and the object\'s\n class has a ``__getattr__()`` method, that is called to satisfy the\n lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the ``open()`` built-in function,\n and also ``os.popen()``, ``os.fdopen()``, and the ``makefile()``\n method of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects ``sys.stdin``, ``sys.stdout`` and ``sys.stderr`` are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n ``io.TextIOBase`` abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_lasti`` gives the precise instruction (this is an index into\n the bytecode string of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_lineno`` is the current line number\n of the frame --- writing to this from within a trace function\n jumps to the given line (only for the bottom-most frame). A\n debugger can implement a Jump command (aka Set Next Statement)\n by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by ``sys.exc_info()``. When the program contains\n no suitable handler, the stack trace is written (nicely\n formatted) to the standard error stream; if the interpreter is\n interactive, it is also made available to the user as\n ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for ``__getitem__()``\n methods. They are also created by the built-in ``slice()``\n function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', 'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', 'typesmapping': '\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterator*\n object. Each item in the iterable must itself be an iterator with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to ``{"one": 1, "two": 2, "three": 3}``:\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n If a subclass of dict defines a method ``__missing__()``, if the\n key *key* is not present, the ``d[key]`` operation calls that\n method with the key *key* as argument. The ``d[key]`` operation\n then returns or raises whatever is returned or raised by the\n ``__missing__(key)`` call if the key is not present. No other\n operations or methods invoke ``__missing__()``. If\n ``__missing__()`` is not defined, ``KeyError`` is raised.\n ``__missing__()`` must be a method; it cannot be an instance\n variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See ``collections.Counter`` for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iter(d.keys())``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n items()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also:\n\n ``types.MappingProxyType`` can be used to create a read-only view\n of a ``dict``.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.keys()``, ``dict.values()`` and\n``dict.items()`` are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that ``(key, value)`` pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class ``collections.abc.Set`` are available (for example, ``==``,\n``<``, or ``^``).\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', 'typesmethods': '\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the\n``self`` argument to the argument list. Bound methods have two\nspecial read-only attributes: ``m.__self__`` is the object on which\nthe method operates, and ``m.__func__`` is the function implementing\nthe method. Calling ``m(arg-1, arg-2, ..., arg-n)`` is completely\nequivalent to calling ``m.__func__(m.__self__, arg-1, arg-2, ...,\narg-n)``.\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.__func__``), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an ``AttributeError`` being raised.\nIn order to set a method attribute, you need to explicitly set it on\nthe underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n', 'typesmodules': "\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special attribute of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ``<module\n'sys' (built-in)>``. If loaded from a file, they are written as\n``<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>``.\n", 'typesseq': '\nSequence Types --- ``list``, ``tuple``, ``range``\n*************************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The ``collections.abc.Sequence``\nABC is provided to make it easier to correctly implement these\noperations on custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe ``in`` and ``not in`` operations have the same priorities as the\ncomparison operations. The ``+`` (concatenation) and ``*``\n(repetition) operations have the same priority as the corresponding\nnumeric operations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+----------------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+----------------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| ``s * n`` or ``n * s`` | *n* shallow copies of *s* | (2)(7) |\n| | concatenated | |\n+----------------------------+----------------------------------+------------+\n| ``s[i]`` | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| ``s.index(x[, i[, j]])`` | index of the first occurence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| ``s.count(x)`` | total number of occurences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the ``in`` and ``not in`` operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as ``str``, ``bytes`` and ``bytearray``) also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. Concatenating immutable sequences always results in a new object.\n This means that building up a sequence by repeated concatenation\n will have a quadratic runtime cost in the total sequence length.\n To get a linear runtime cost, you must switch to one of the\n alternatives below:\n\n * if concatenating ``str`` objects, you can build a list and use\n ``str.join()`` at the end or else write to a ``io.StringIO``\n instance and retrieve its value when complete\n\n * if concatenating ``bytes`` objects, you can similarly use\n ``bytes.join()`` or ``io.BytesIO``, or you can do in-place\n concatenation with a ``bytearray`` object. ``bytearray`` objects\n are mutable and have an efficient overallocation mechanism\n\n * if concatenating ``tuple`` objects, extend a ``list`` instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as ``range``) only support item sequences\n that follow specific patterns, and hence don\'t support sequence\n concatenation or repetition.\n\n8. ``index`` raises ``ValueError`` when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using ``s[i:j].index(x)``,\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe ``hash()`` built-in.\n\nThis support allows immutable sequences, such as ``tuple`` instances,\nto be used as ``dict`` keys and stored in ``set`` and ``frozenset``\ninstances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in ``TypeError``.\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The ``collections.abc.MutableSequence`` ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, ``bytearray`` only\naccepts integers that meet the value restriction ``0 <= x <= 255``).\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | appends *x* to the end of the | |\n| | sequence (same as | |\n| | ``s[len(s):len(s)] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | removes all items from ``s`` | (5) |\n| | (same as ``del s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | creates a shallow copy of ``s`` | (5) |\n| | (same as ``s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(t)`` | extends *s* with the contents of | |\n| | *t* (same as ``s[len(s):len(s)] | |\n| | = t``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | ``s[i:i] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | remove the first item from *s* | (3) |\n| | where ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n3. ``remove`` raises ``ValueError`` when *x* is not found in *s*.\n\n4. The ``reverse()`` method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. ``clear()`` and ``copy()`` are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as ``dict`` and ``set``)\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: ``[]``\n\n * Using square brackets, separating items with commas: ``[a]``,\n ``[a, b, c]``\n\n * Using a list comprehension: ``[x for x in iterable]``\n\n * Using the type constructor: ``list()`` or ``list(iterable)``\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to ``iterable[:]``. For example, ``list(\'abc\')``\n returns ``[\'a\', \'b\', \'c\']`` and ``list( (1, 2, 3) )`` returns ``[1,\n 2, 3]``. If no argument is given, the constructor creates a new\n empty list, ``[]``.\n\n Many other operations also produce lists, including the\n ``sorted()`` built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only ``<``\n comparisons between items. Exceptions are not suppressed - if\n any comparison operations fail, the entire sort operation will\n fail (and the list will likely be left in a partially modified\n state).\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n ``key=str.lower``). The key corresponding to each item in the\n list is calculated once and then used for the entire sorting\n process. The default value of ``None`` means that list items are\n sorted directly without calculating a separate key value.\n\n The ``functools.cmp_to_key()`` utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n ``sorted()`` to explicitly request a new sorted list instance).\n\n The ``sort()`` method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises ``ValueError`` if it can\n detect that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the\n``enumerate()`` built-in). Tuples are also used for cases where an\nimmutable sequence of homogeneous data is needed (such as allowing\nstorage in a ``set`` or ``dict`` instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: ``()``\n\n * Using a trailing comma for a singleton tuple: ``a,`` or ``(a,)``\n\n * Separating items with commas: ``a, b, c`` or ``(a, b, c)``\n\n * Using the ``tuple()`` built-in: ``tuple()`` or\n ``tuple(iterable)``\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, ``tuple(\'abc\')`` returns ``(\'a\', \'b\',\n \'c\')`` and ``tuple( [1, 2, 3] )`` returns ``(1, 2, 3)``. If no\n argument is given, the constructor creates a new empty tuple,\n ``()``.\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, ``f(a, b, c)`` is a function call with three\n arguments, while ``f((a, b, c))`` is a function call with a 3-tuple\n as the sole argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, ``collections.namedtuple()`` may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe ``range`` type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in ``for`` loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in ``int`` or any object that implements the ``__index__``\n special method). If the *step* argument is omitted, it defaults to\n ``1``. If the *start* argument is omitted, it defaults to ``0``. If\n *step* is zero, ``ValueError`` is raised.\n\n For a positive *step*, the contents of a range ``r`` are determined\n by the formula ``r[i] = start + step*i`` where ``i >= 0`` and\n ``r[i] < stop``.\n\n For a negative *step*, the contents of the range are still\n determined by the formula ``r[i] = start + step*i``, but the\n constraints are ``i >= 0`` and ``r[i] > stop``.\n\n A range object will be empty if ``r[0]`` does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than ``sys.maxsize`` are\n permitted but some features (such as ``len()``) may raise\n ``OverflowError``.\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the ``range`` type over a regular ``list`` or\n``tuple`` is that a ``range`` object will always take the same (small)\namount of memory, no matter the size of the range it represents (as it\nonly stores the ``start``, ``stop`` and ``step`` values, calculating\nindividual items and subranges as needed).\n\nRange objects implement the ``collections.Sequence`` ABC, and provide\nfeatures such as containment tests, element index lookup, slicing and\nsupport for negative indices (see *Sequence Types --- list, tuple,\nrange*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with ``==`` and ``!=`` compares\nthem as sequences. That is, two range objects are considered equal if\nthey represent the same sequence of values. (Note that two range\nobjects that compare equal might have different ``start``, ``stop``\nand ``step`` attributes, for example ``range(0) == range(2, 1, 3)`` or\n``range(0, 3, 2) == range(0, 4, 2)``.)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test ``int`` objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The ``start``, ``stop`` and ``step`` attributes.\n', 'typesseq-mutable': "\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The ``collections.abc.MutableSequence`` ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, ``bytearray`` only\naccepts integers that meet the value restriction ``0 <= x <= 255``).\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | appends *x* to the end of the | |\n| | sequence (same as | |\n| | ``s[len(s):len(s)] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | removes all items from ``s`` | (5) |\n| | (same as ``del s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | creates a shallow copy of ``s`` | (5) |\n| | (same as ``s[:]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(t)`` | extends *s* with the contents of | |\n| | *t* (same as ``s[len(s):len(s)] | |\n| | = t``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | ``s[i:i] = [x]``) | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | remove the first item from *s* | (3) |\n| | where ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n3. ``remove`` raises ``ValueError`` when *x* is not found in *s*.\n\n4. The ``reverse()`` method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. ``clear()`` and ``copy()`` are included for consistency with the\n interfaces of mutable containers that don't support slicing\n operations (such as ``dict`` and ``set``)\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n", 'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of ``x`` is defined as\n``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n', 'while': '\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n', 'with': '\nThe ``with`` statement\n**********************\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'yield': '\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the ``next()`` function on\nthe generator repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of ``expression_list`` is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nThe ``yield`` statement is allowed in the ``try`` clause of a ``try``\n... ``finally`` construct. If the generator is not resumed before it\nis finalized (by reaching a zero reference count or by being garbage\ncollected), the generator-iterator\'s ``close()`` method will be\ncalled, allowing any pending ``finally`` clauses to execute.\n\nWhen ``yield from <expr>`` is used, it treats the supplied expression\nas a subiterator, producing values from it until the underlying\niterator is exhausted.\n\n Changed in version 3.3: Added ``yield from <expr>`` to delegate\n control flow to a subiterator\n\nFor full details of ``yield`` semantics, refer to the *Yield\nexpressions* section.\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal to enhance the API and syntax of generators, making\n them usable as simple coroutines.\n\n **PEP 0380** - Syntax for Delegating to a Subgenerator\n The proposal to introduce the ``yield_from`` syntax, making\n delegation to sub-generators easy.\n'}
hongzhouye/frankenstein
refs/heads/master
tests/be_test.py
1
""" This script tests the scf module. """ import os import numpy as np import pytest from frankenstein.be import BE @pytest.mark.parametrize( "geom, basis, fsites, frank, incore, nibath, solver, jac, B0, bad_con, " "good_con, e_ps_ref", [ # Frankenstein BE, FCI solver ("geom/h10.zmat", "sto-3g", [0,1,2], True, True, False, "FCI", None, None, [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # frank/FCI/QN/eye ("geom/h10.zmat", "sto-3g", [0,1,2], True, True, False, "FCI", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # frank/FCI/QN/scf ("geom/h10.zmat", "sto-3g", [0,1,2], True, True, False, "FCI", 2, None, [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # frank/FCI/NR/eye ("geom/h10.zmat", "sto-3g", [0,1,2], True, True, False, "FCI", 2, "mp2", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # frank/FCI/NR/mp2 # Frankenstein BE, FCI solver, nibath ("geom/h10.zmat", "sto-3g", [0,1,2], True, True, True, "FCI", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8207132177, -2.8129821608, -2.8207131811]), # PySCF BE, FCI solver ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, False, "FCI", None, None, [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # pyscf/FCI/QN/eye ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, False, "FCI", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # pyscf/FCI/QN/scf ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, False, "FCI", 2, None, [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # pyscf/FCI/NR/eye ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, False, "FCI", 2, "mp2", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # pyscf/FCI/NR/mp2 # PySCF BE, different solvers ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, False, "mp2", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8123768, -2.8133190, -2.8123768]), # pyscf/mp2/QN/scf ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, False, "ccsd", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127582, -2.8149148, -2.8127582]), # pyscf/ccsd/QN/scf ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, False, "cisd", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127194, -2.8148940, -2.8127194]), # pyscf/cisd/QN/scf # PySCF BE, different fragment size ("geom/h10.zmat", "sto-3g", [0,1,2,3], False, True, False, "fci", None, "scf", [[[0,0],[3,3]],[[1,1],[2,2]], [[0,1],[2,3]]], [0.5, 0.5, [1,2]], [-2.8137710, -2.8142425, -2.8142424, -2.8137710]), # pyscf/FCI/QN/scf # PySCF BE, not incore ("geom/h10.zmat", "sto-3g", [0,1,2], False, False, False, "FCI", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127889, -2.8149386, -2.8127889]), # pyscf/FCI/QN/scf ("geom/h10.zmat", "sto-3g", [0,1,2], False, False, False, "mp2", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8123768, -2.8133190, -2.8123768]), # pyscf/mp2/QN/scf ("geom/h10.zmat", "sto-3g", [0,1,2], False, False, False, "ccsd", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127582, -2.8149148, -2.8127582]), # pyscf/ccsd/QN/scf ("geom/h10.zmat", "sto-3g", [0,1,2], False, False, False, "cisd", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8127194, -2.8148940, -2.8127194]), # pyscf/cisd/QN/scf # PySCF BE, nibath ("geom/h10.zmat", "sto-3g", [0,1,2], False, True, True, "FCI", None, "scf", [[[0,0],[2,2]],[[1,1]]], [0.5, 0.5], [-2.8207132177, -2.8129821608, -2.8207131811]), ]) def test_be(geom, basis, fsites, frank, incore, nibath, solver, jac, B0, bad_con, good_con, e_ps_ref): verbose = 2 if frank: from frankenstein import molecule, scf from frankenstein.be import SD mol = molecule.MOL(geom, basis, ao_direct=False) mf = scf.RHF(mol, opt="diis") mf.kernel() msd = SD(mf, fsites, nibath=nibath, verbose=verbose) else: from frankenstein.tools.pyscf_utils import get_pymol from pyscf import scf as pyscf_scf from frankenstein.pyscf_be import pySD pymol = get_pymol(geom, basis) pymf = pyscf_scf.RHF(pymol) pymf.kernel() msd = pySD(pymf, fsites, incore=incore, nibath=nibath, verbose=verbose) mb = BE(msd, imp_sol=solver, jac=jac, B0=B0, bad_con=bad_con, good_con=good_con) mb.kernel() print(np.diag(mb.rdm1s)) mb.msd.delete_eri() mb.msd.delete_erifile() assert(np.allclose(mb.e_persite, e_ps_ref)) @pytest.mark.parametrize( "geom, basis, natm, incore, nibath, frzcmo, fast_ao2mo, auxbasis, mtl," "imp_sol, B0, ec_ref", [ ("geom/c6h6.xyz", "sto-3g", 2, True, False, False, False, None, [["intra", "1epop"]], "mp2", "scf", -0.3766784783), # incore, int_bath ("geom/c6h6.xyz", "sto-3g", 2, False, False, False, False, None, [["intra", "1epop"]], "mp2", "scf", -0.3766784783), # outcore, int_bath ("geom/c6h6.xyz", "sto-3g", 2, True, False, True, True, None, [["intra", "1epop"]], "mp2", "scf", -0.3769771553), # incore, int_bath ("geom/c6h6.xyz", "sto-3g", 2, True, False, True, True, "def2-svp-c", [["intra", "1epop"]], "mp2", "scf", -0.3769766986), # incore, int_bath ("geom/si10h16.xyz", "sto-3g", 1, True, False, True, True, "def2-svp-c", [["intra", "1epop"]], "mp2", "scf", -0.4488356791), # incore, int_bath ]) def test_beatm(geom, basis, natm, incore, nibath, frzcmo, fast_ao2mo, auxbasis, mtl, imp_sol, B0, ec_ref): from frankenstein.tools.pyscf_utils import get_pymol from pyscf import scf as pyscf_scf from frankenstein.pyscf_be.pysd_atom import pySDATOM from frankenstein.be.bemol import BEatom pymol = get_pymol(geom, basis) pymf = pyscf_scf.RHF(pymol) pymf.kernel() msdatm = pySDATOM(pymf, natm, incore=incore, nibath=nibath, frzcore_mo=frzcmo, matchtypelist=mtl, auxbasis=auxbasis, fast_ao2mo=fast_ao2mo, verbose=2) mbatm = BEatom(msdatm, imp_sol=imp_sol, B0=B0) mbatm.kernel() assert(np.abs(mbatm.e_corr-ec_ref) < 1.E-4) @pytest.mark.parametrize( "geom, basis, natm, rdm_type, mtl, ec_ref", [("geom/c6h6.xyz", "sto-3g", 1, "relaxed", [["intra", "1epop"]], -0.5042406923), ("geom/c6h6.xyz", "sto-3g", 1, "unrelaxed1", [["intra", "1epop"]], -0.5068129669), ("geom/c6h6.xyz", "sto-3g", 1, "unrelaxed2", [["intra", "1epop"]], -0.4264953732)] ) def test_ccsd_rdm(geom, basis, natm, rdm_type, mtl, ec_ref): from frankenstein.tools.pyscf_utils import get_pymol from pyscf import scf as pyscf_scf from frankenstein.pyscf_be.pysd_atom import pySDATOM from frankenstein.be.bemol import BEatom pymol = get_pymol(geom, basis) pymf = pyscf_scf.RHF(pymol) pymf.kernel() msdatm = pySDATOM(pymf, natm, fast_ao2mo=True, frzcore=False, frzcore_mo=True, matchtypelist=mtl, verbose=2) mbatm = BEatom(msdatm, B0="scf", imp_sol="ccsd", sol_params={"rdm":rdm_type},) mbatm.kernel() assert(np.abs(mbatm.e_corr-ec_ref) < 1.E-4)
AthinaB/synnefo
refs/heads/feature-newui-pithos
snf-pithos-backend/pithos/backends/lib/sqlalchemy/alembic/versions/2efddde15abf_diefferentiate_hashm.py
10
"""Differentiate hashmap from mapfile Revision ID: 2efddde15abf Revises: e6edec1b499 Create Date: 2014-06-11 10:46:04.116321 """ # revision identifiers, used by Alembic. revision = '2efddde15abf' down_revision = 'e6edec1b499' from alembic import op import sqlalchemy as sa def upgrade(): op.execute(sa.schema.CreateSequence(sa.schema.Sequence("mapfile_seq"))) op.add_column('versions', sa.Column('mapfile', sa.String(256))) op.add_column('versions', sa.Column('is_snapshot', sa.Boolean, nullable=False, default=False, server_default='False')) v = sa.sql.table( 'versions', sa.sql.column('hash', sa.String), sa.sql.column('mapfile', sa.String), sa.sql.column('is_snapshot', sa.Boolean)) u = v.update().values({'mapfile': v.c.hash, 'is_snapshot': sa.case([(v.c.hash.like('archip:%'), True)], else_=False)}) op.execute(u) def downgrade(): op.drop_column('versions', 'is_snapshot') op.drop_column('versions', 'mapfile') op.execute(sa.schema.DropSequence(sa.schema.Sequence("mapfile_seq")))
lucassid/ns-3-dev-git
refs/heads/master
src/buildings/bindings/callbacks_list.py
664
callback_classes = [ ['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ]
jholster/django-birdie
refs/heads/master
appearance/tests.py
1940
""" This file demonstrates two different styles of tests (one doctest and one unittest). These will both pass when you run "manage.py test". Replace these with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.failUnlessEqual(1 + 1, 2) __test__ = {"doctest": """ Another way to test that 1 + 1 is equal to 2. >>> 1 + 1 == 2 True """}
GbalsaC/bitnamiP
refs/heads/master
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
1776
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import SJISDistributionAnalysis from .jpcntx import SJISContextAnalysis from .mbcssm import SJISSMModel from . import constants class SJISProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(SJISSMModel) self._mDistributionAnalyzer = SJISDistributionAnalysis() self._mContextAnalyzer = SJISContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return self._mContextAnalyzer.get_charset_name() def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:], charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3 - charLen], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
eugena/django
refs/heads/master
django/http/request.py
66
from __future__ import unicode_literals import copy import re import sys from io import BytesIO from itertools import chain from django.conf import settings from django.core import signing from django.core.exceptions import DisallowedHost, ImproperlyConfigured from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser, MultiPartParserError from django.utils import six from django.utils.datastructures import ImmutableList, MultiValueDict from django.utils.encoding import ( escape_uri_path, force_bytes, force_str, force_text, iri_to_uri, ) from django.utils.six.moves.urllib.parse import ( parse_qsl, quote, urlencode, urljoin, urlsplit, ) RAISE_ERROR = object() host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$") class UnreadablePostError(IOError): pass class RawPostDataException(Exception): """ You cannot access raw_post_data from a request that has multipart/* POST data if it has been accessed via POST, FILES, etc.. """ pass class HttpRequest(object): """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): # WARNING: The `WSGIRequest` subclass doesn't call `super`. # Any variable assignment made here should also happen in # `WSGIRequest.__init__()`. self.GET = QueryDict(mutable=True) self.POST = QueryDict(mutable=True) self.COOKIES = {} self.META = {} self.FILES = MultiValueDict() self.path = '' self.path_info = '' self.method = None self.resolver_match = None self._post_parse_error = False def __repr__(self): if self.method is None or not self.get_full_path(): return force_str('<%s>' % self.__class__.__name__) return force_str( '<%s: %s %r>' % (self.__class__.__name__, self.method, force_str(self.get_full_path())) ) def get_host(self): """Returns the HTTP host using the environment or request headers.""" # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = self.get_port() if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) # There is no hostname validation when DEBUG=True if settings.DEBUG: return host domain, port = split_domain_port(host) if domain and validate_host(domain, settings.ALLOWED_HOSTS): return host else: msg = "Invalid HTTP_HOST header: %r." % host if domain: msg += " You may need to add %r to ALLOWED_HOSTS." % domain else: msg += " The domain name provided is not valid according to RFC 1034/1035." raise DisallowedHost(msg) def get_port(self): """Return the port number for the request as a string.""" if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META: port = self.META['HTTP_X_FORWARDED_PORT'] else: port = self.META['SERVER_PORT'] return str(port) def get_full_path(self, force_append_slash=False): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s%s' % ( escape_uri_path(self.path), '/' if force_append_slash and not self.path.endswith('/') else '', ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '' ) def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempts to return a signed cookie. If the signature fails or the cookie has expired, raises an exception... unless you provide the default argument in which case that value will be returned instead. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Builds an absolute URI from the location and the variables available in this request. If no ``location`` is specified, the absolute URI is built on ``request.get_full_path()``. Anyway, if the location is absolute, it is simply converted to an RFC 3987 compliant URI and returned and if location is relative or is scheme-relative (i.e., ``//example.com/``), it is urljoined to a base URL constructed from the request variables. """ if location is None: # Make it an absolute url (but schemeless and domainless) for the # edge case that the path starts with '//'. location = '//%s' % self.get_full_path() bits = urlsplit(location) if not (bits.scheme and bits.netloc): current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme, host=self.get_host(), path=self.path) # Join the constructed URL with the provided location, which will # allow the provided ``location`` to apply query strings to the # base path as well as override the host, if it begins with // location = urljoin(current_uri, location) return iri_to_uri(location) def _get_scheme(self): """ Hook for subclasses like WSGIRequest to implement. Returns 'http' by default. """ return 'http' @property def scheme(self): if settings.SECURE_PROXY_SSL_HEADER: try: header, value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured( 'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.' ) if self.META.get(header) == value: return 'https' return self._get_scheme() def is_secure(self): return self.scheme == 'https' def is_ajax(self): return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Sets the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, it is removed and recreated on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, '_get'): del self._get if hasattr(self, '_post'): del self._post def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise RawPostDataException("You cannot access body after reading from request's data stream") try: self._body = self.read() except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) self._stream = BytesIO(self._body) return self._body def _mark_post_parse_error(self): self._post = QueryDict('') self._files = MultiValueDict() self._post_parse_error = True def _load_post_and_files(self): """Populate self._post and self._files if the content-type is a form type""" if self.method != 'POST': self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'): if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except MultiPartParserError: # An error occurred while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. # Mark that an error occurred. This allows self.__repr__ to # be explicit about it instead of simply representing an # empty POST self._mark_post_parse_error() raise elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'): self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() else: self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict() def close(self): if hasattr(self, '_files'): for f in chain.from_iterable(l[1] for l in self._files.lists()): f.close() # File-like and iterator interface. # # Expects self._stream to be set to an appropriate source of bytes by # a corresponding request subclass (e.g. WSGIRequest). # Also when request data has already been read by request.POST or # request.body, self._stream points to a BytesIO instance # containing that data. def read(self, *args, **kwargs): self._read_started = True try: return self._stream.read(*args, **kwargs) except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) def readline(self, *args, **kwargs): self._read_started = True try: return self._stream.readline(*args, **kwargs) except IOError as e: six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2]) def xreadlines(self): while True: buf = self.readline() if not buf: break yield buf __iter__ = xreadlines def readlines(self): return list(iter(self)) class QueryDict(MultiValueDict): """ A specialized MultiValueDict which represents a query string. A QueryDict can be used to represent GET or POST data. It subclasses MultiValueDict since keys in such data can be repeated, for instance in the data from a form with a <select multiple> field. By default QueryDicts are immutable, though the copy() method will always return a mutable copy. Both keys and values set on this class are converted from the given encoding (DEFAULT_CHARSET by default) to unicode. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string=None, mutable=False, encoding=None): super(QueryDict, self).__init__() if not encoding: encoding = settings.DEFAULT_CHARSET self.encoding = encoding if six.PY3: if isinstance(query_string, bytes): # query_string normally contains URL-encoded data, a subset of ASCII. try: query_string = query_string.decode(encoding) except UnicodeDecodeError: # ... but some user agents are misbehaving :-( query_string = query_string.decode('iso-8859-1') for key, value in parse_qsl(query_string or '', keep_blank_values=True, encoding=encoding): self.appendlist(key, value) else: for key, value in parse_qsl(query_string or '', keep_blank_values=True): try: value = value.decode(encoding) except UnicodeDecodeError: value = value.decode('iso-8859-1') self.appendlist(force_text(key, encoding, errors='replace'), value) self._mutable = mutable @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super(QueryDict, self).__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in six.iterlists(self): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in six.iterlists(self): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super(QueryDict, self).setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super(QueryDict, self).setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super(QueryDict, self).appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super(QueryDict, self).pop(key, *args) def popitem(self): self._assert_mutable() return super(QueryDict, self).popitem() def clear(self): self._assert_mutable() super(QueryDict, self).clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super(QueryDict, self).setdefault(key, default) def copy(self): """Returns a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = force_bytes(safe, self.encoding) encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = force_bytes(k, self.encoding) output.extend(encode(k, force_bytes(v, self.encoding)) for v in list_) return '&'.join(output) # It's neither necessary nor appropriate to use # django.utils.encoding.smart_text for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Converts basestring objects to unicode, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Returns any non-basestring objects without change. """ if isinstance(s, bytes): return six.text_type(s, encoding, 'replace') else: return s def split_domain_port(host): """ Return a (domain, port) tuple from a given host. Returned domain is lower-cased. If the host is invalid, the domain will be empty. """ host = host.lower() if not host_validation_re.match(host): return '', '' if host[-1] == ']': # It's an IPv6 address without a port. return host, '' bits = host.rsplit(':', 1) if len(bits) == 2: return tuple(bits) return bits[0], '' def validate_host(host, allowed_hosts): """ Validate the given host for this site. Check that the host looks valid and matches a host or host pattern in the given list of ``allowed_hosts``. Any pattern beginning with a period matches a domain and all its subdomains (e.g. ``.example.com`` matches ``example.com`` and any subdomain), ``*`` matches anything, and anything else must match exactly. Note: This function assumes that the given host is lower-cased and has already had the port, if any, stripped off. Return ``True`` for a valid host, ``False`` otherwise. """ host = host[:-1] if host.endswith('.') else host for pattern in allowed_hosts: pattern = pattern.lower() match = ( pattern == '*' or pattern.startswith('.') and ( host.endswith(pattern) or host == pattern[1:] ) or pattern == host ) if match: return True return False
pllim/astropy
refs/heads/placeholder
astropy/visualization/wcsaxes/tests/test_misc.py
4
# Licensed under a 3-clause BSD style license - see LICENSE.rst from packaging.version import Version import pytest import numpy as np import matplotlib import matplotlib.pyplot as plt from contextlib import nullcontext from matplotlib.contour import QuadContourSet from astropy import units as u from astropy.wcs import WCS from astropy.io import fits from astropy.coordinates import SkyCoord from astropy.utils.data import get_pkg_data_filename from astropy.visualization.wcsaxes.core import WCSAxes from astropy.visualization.wcsaxes.frame import ( EllipticalFrame, RectangularFrame, RectangularFrame1D) from astropy.visualization.wcsaxes.utils import get_coord_meta from astropy.visualization.wcsaxes.transforms import CurvedTransform ft_version = Version(matplotlib.ft2font.__freetype_version__) FREETYPE_261 = ft_version == Version("2.6.1") TEX_UNAVAILABLE = not matplotlib.checkdep_usetex(True) MATPLOTLIB_GT_3_4_2 = Version(matplotlib.__version__) > Version('3.4.2') def teardown_function(function): plt.close('all') def test_grid_regression(ignore_matplotlibrc): # Regression test for a bug that meant that if the rc parameter # axes.grid was set to True, WCSAxes would crash upon initalization. plt.rc('axes', grid=True) fig = plt.figure(figsize=(3, 3)) WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) def test_format_coord_regression(ignore_matplotlibrc, tmpdir): # Regression test for a bug that meant that if format_coord was called by # Matplotlib before the axes were drawn, an error occurred. fig = plt.figure(figsize=(3, 3)) ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) fig.add_axes(ax) assert ax.format_coord(10, 10) == "" assert ax.coords[0].format_coord(10) == "" assert ax.coords[1].format_coord(10) == "" fig.savefig(tmpdir.join('nothing').strpath) assert ax.format_coord(10, 10) == "10.0 10.0 (world)" assert ax.coords[0].format_coord(10) == "10.0" assert ax.coords[1].format_coord(10) == "10.0" TARGET_HEADER = fits.Header.fromstring(""" NAXIS = 2 NAXIS1 = 200 NAXIS2 = 100 CTYPE1 = 'RA---MOL' CRPIX1 = 500 CRVAL1 = 180.0 CDELT1 = -0.4 CUNIT1 = 'deg ' CTYPE2 = 'DEC--MOL' CRPIX2 = 400 CRVAL2 = 0.0 CDELT2 = 0.4 CUNIT2 = 'deg ' COORDSYS= 'icrs ' """, sep='\n') @pytest.mark.parametrize('grid_type', ['lines', 'contours']) def test_no_numpy_warnings(ignore_matplotlibrc, tmpdir, grid_type): ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER)) ax.imshow(np.zeros((100, 200))) ax.coords.grid(color='white', grid_type=grid_type) if MATPLOTLIB_GT_3_4_2 and grid_type == 'contours': ctx = pytest.raises(AttributeError, match='dpi') else: ctx = nullcontext() with pytest.warns(None) as warning_lines, ctx: plt.savefig(tmpdir.join('test.png').strpath) # There should be no warnings raised if some pixels are outside WCS # (since this is normal). # BUT catch_warning was ignoring some warnings before, so now we # have to catch it. Otherwise, the pytest filterwarnings=error # setting in setup.cfg will fail this test. # There are actually multiple warnings but they are all similar. for w in warning_lines: w_msg = str(w.message) assert ('converting a masked element to nan' in w_msg or 'No contour levels were found within the data range' in w_msg or 'np.asscalar(a) is deprecated since NumPy v1.16' in w_msg or 'PY_SSIZE_T_CLEAN will be required' in w_msg) def test_invalid_frame_overlay(ignore_matplotlibrc): # Make sure a nice error is returned if a frame doesn't exist ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER)) with pytest.raises(ValueError) as exc: ax.get_coords_overlay('banana') assert exc.value.args[0] == 'Frame banana not found' with pytest.raises(ValueError) as exc: get_coord_meta('banana') assert exc.value.args[0] == 'Unknown frame: banana' def test_plot_coord_transform(ignore_matplotlibrc): twoMASS_k_header = get_pkg_data_filename('data/2MASS_k_header') twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header) fig = plt.figure(figsize=(6, 6)) ax = fig.add_axes([0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect='equal') ax.set_xlim(-0.5, 720.5) ax.set_ylim(-0.5, 720.5) c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg) with pytest.raises(TypeError): ax.plot_coord(c, 'o', transform=ax.get_transform('galactic')) def test_set_label_properties(ignore_matplotlibrc): # Regression test to make sure that arguments passed to # set_xlabel/set_ylabel are passed to the underlying coordinate helpers ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER)) ax.set_xlabel('Test x label', labelpad=2, color='red') ax.set_ylabel('Test y label', labelpad=3, color='green') assert ax.coords[0].axislabels.get_text() == 'Test x label' assert ax.coords[0].axislabels.get_minpad('b') == 2 assert ax.coords[0].axislabels.get_color() == 'red' assert ax.coords[1].axislabels.get_text() == 'Test y label' assert ax.coords[1].axislabels.get_minpad('l') == 3 assert ax.coords[1].axislabels.get_color() == 'green' assert ax.get_xlabel() == 'Test x label' assert ax.get_ylabel() == 'Test y label' GAL_HEADER = fits.Header.fromstring(""" SIMPLE = T / conforms to FITS standard BITPIX = -32 / array data type NAXIS = 3 / number of array dimensions NAXIS1 = 31 NAXIS2 = 2881 NAXIS3 = 480 EXTEND = T CTYPE1 = 'DISTMOD ' CRVAL1 = 3.5 CDELT1 = 0.5 CRPIX1 = 1.0 CTYPE2 = 'GLON-CAR' CRVAL2 = 180.0 CDELT2 = -0.125 CRPIX2 = 1.0 CTYPE3 = 'GLAT-CAR' CRVAL3 = 0.0 CDELT3 = 0.125 CRPIX3 = 241.0 """, sep='\n') def test_slicing_warnings(ignore_matplotlibrc, tmpdir): # Regression test to make sure that no warnings are emitted by the tick # locator for the sliced axis when slicing a cube. # Scalar case wcs3d = WCS(naxis=3) wcs3d.wcs.ctype = ['x', 'y', 'z'] wcs3d.wcs.cunit = ['deg', 'deg', 'km/s'] wcs3d.wcs.crpix = [614.5, 856.5, 333] wcs3d.wcs.cdelt = [6.25, 6.25, 23] wcs3d.wcs.crval = [0., 0., 1.] with pytest.warns(None) as warning_lines: plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1)) plt.savefig(tmpdir.join('test.png').strpath) # For easy debugging if there are indeed warnings for warning in warning_lines: # https://github.com/astropy/astropy/issues/9690 if 'PY_SSIZE_T_CLEAN' not in str(warning.message): raise AssertionError(f'Unexpected warning: {warning}') # Angle case wcs3d = WCS(GAL_HEADER) with pytest.warns(None) as warning_lines: plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2)) plt.savefig(tmpdir.join('test.png').strpath) # For easy debugging if there are indeed warnings for warning in warning_lines: # https://github.com/astropy/astropy/issues/9690 if 'PY_SSIZE_T_CLEAN' not in str(warning.message): raise AssertionError(f'Unexpected warning: {warning}') def test_plt_xlabel_ylabel(tmpdir): # Regression test for a bug that happened when using plt.xlabel # and plt.ylabel with Matplotlib 3.0 plt.subplot(projection=WCS()) plt.xlabel('Galactic Longitude') plt.ylabel('Galactic Latitude') plt.savefig(tmpdir.join('test.png').strpath) def test_grid_type_contours_transform(tmpdir): # Regression test for a bug that caused grid_type='contours' to not work # with custom transforms class CustomTransform(CurvedTransform): # We deliberately don't define the inverse, and has_inverse should # default to False. def transform(self, values): return values * 1.3 transform = CustomTransform() coord_meta = {'type': ('scalar', 'scalar'), 'unit': (u.m, u.s), 'wrap': (None, None), 'name': ('x', 'y')} fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], transform=transform, coord_meta=coord_meta) fig.add_axes(ax) ax.grid(grid_type='contours') fig.savefig(tmpdir.join('test.png').strpath) def test_plt_imshow_origin(): # Regression test for a bug that caused origin to be set to upper when # plt.imshow was called. ax = plt.subplot(projection=WCS()) plt.imshow(np.ones((2, 2))) assert ax.get_xlim() == (-0.5, 1.5) assert ax.get_ylim() == (-0.5, 1.5) def test_ax_imshow_origin(): # Regression test for a bug that caused origin to be set to upper when # ax.imshow was called with no origin ax = plt.subplot(projection=WCS()) ax.imshow(np.ones((2, 2))) assert ax.get_xlim() == (-0.5, 1.5) assert ax.get_ylim() == (-0.5, 1.5) def test_grid_contour_large_spacing(tmpdir): # Regression test for a bug that caused a crash when grid was called and # didn't produce grid lines (due e.g. to too large spacing) and was then # called again. filename = tmpdir.join('test.png').strpath ax = plt.subplot(projection=WCS()) ax.set_xlim(-0.5, 1.5) ax.set_ylim(-0.5, 1.5) ax.coords[0].set_ticks(values=[] * u.one) ax.coords[0].grid(grid_type='contours') plt.savefig(filename) ax.coords[0].grid(grid_type='contours') plt.savefig(filename) def test_contour_return(): # Regression test for a bug that caused contour and contourf to return None # instead of the contour object. fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) fig.add_axes(ax) cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform('world')) assert isinstance(cset, QuadContourSet) cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform('world')) assert isinstance(cset, QuadContourSet) def test_contour_empty(): # Regression test for a bug that caused contour to crash if no contours # were present. fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) fig.add_axes(ax) with pytest.warns(UserWarning, match='No contour levels were found within the data range'): ax.contour(np.zeros((4, 4)), transform=ax.get_transform('world')) def test_iterate_coords(ignore_matplotlibrc, tmpdir): # Regression test for a bug that caused ax.coords to return too few axes wcs3d = WCS(naxis=3) wcs3d.wcs.ctype = ['x', 'y', 'z'] wcs3d.wcs.cunit = ['deg', 'deg', 'km/s'] wcs3d.wcs.crpix = [614.5, 856.5, 333] wcs3d.wcs.cdelt = [6.25, 6.25, 23] wcs3d.wcs.crval = [0., 0., 1.] ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1)) x, y, z = ax.coords def test_invalid_slices_errors(ignore_matplotlibrc): # Make sure that users get a clear message when specifying a WCS with # >2 dimensions without giving the 'slices' argument, or if the 'slices' # argument has too many/few elements. wcs3d = WCS(naxis=3) wcs3d.wcs.ctype = ['x', 'y', 'z'] plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1)) with pytest.raises(ValueError) as exc: plt.subplot(1, 1, 1, projection=wcs3d) assert exc.value.args[0] == ("WCS has more than 2 pixel dimensions, so " "'slices' should be set") with pytest.raises(ValueError) as exc: plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1, 2)) assert exc.value.args[0] == ("'slices' should have as many elements as " "WCS has pixel dimensions (should be 3)") wcs2d = WCS(naxis=2) wcs2d.wcs.ctype = ['x', 'y'] ax = plt.subplot(1, 1, 1, projection=wcs2d) assert ax.frame_class is RectangularFrame ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('x', 'y')) assert ax.frame_class is RectangularFrame ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('y', 'x')) assert ax.frame_class is RectangularFrame ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=['x', 'y']) assert ax.frame_class is RectangularFrame ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'x')) assert ax.frame_class is RectangularFrame1D wcs1d = WCS(naxis=1) wcs1d.wcs.ctype = ['x'] ax = plt.subplot(1, 1, 1, projection=wcs1d) assert ax.frame_class is RectangularFrame1D with pytest.raises(ValueError): plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'y')) EXPECTED_REPR_1 = """ <CoordinatesMap with 3 world coordinates: index aliases type unit wrap format_unit visible ----- ------------------------------ --------- ---- ---- ----------- ------- 0 distmod dist scalar None no 1 pos.galactic.lon glon-car glon longitude deg 360 deg yes 2 pos.galactic.lat glat-car glat latitude deg None deg yes > """.strip() EXPECTED_REPR_2 = """ <CoordinatesMap with 3 world coordinates: index aliases type unit wrap format_unit visible ----- ------------------------------ --------- ---- ---- ----------- ------- 0 distmod dist scalar None yes 1 pos.galactic.lon glon-car glon longitude deg 360 deg yes 2 pos.galactic.lat glat-car glat latitude deg None deg yes > """.strip() def test_repr(ignore_matplotlibrc): # Unit test to make sure __repr__ looks as expected wcs3d = WCS(GAL_HEADER) # Cube header has world coordinates as distance, lon, lat, so start off # by slicing in a way that we select just lon,lat: ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, 'x', 'y')) assert repr(ax.coords) == EXPECTED_REPR_1 # Now slice in a way that all world coordinates are still present: ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1)) assert repr(ax.coords) == EXPECTED_REPR_2 @pytest.fixture def time_spectral_wcs_2d(): wcs = WCS(naxis=2) wcs.wcs.ctype = ['FREQ', 'TIME'] wcs.wcs.set() return wcs def test_time_wcs(time_spectral_wcs_2d): # Regression test for a bug that caused WCSAxes to error when using a WCS # with a time axis. plt.subplot(projection=time_spectral_wcs_2d) @pytest.mark.skipif('TEX_UNAVAILABLE') def test_simplify_labels_usetex(ignore_matplotlibrc, tmpdir): """Regression test for https://github.com/astropy/astropy/issues/8004.""" plt.rc('text', usetex=True) header = { 'NAXIS': 2, 'NAXIS1': 360, 'NAXIS2': 180, 'CRPIX1': 180.5, 'CRPIX2': 90.5, 'CRVAL1': 180.0, 'CRVAL2': 0.0, 'CDELT1': -2 * np.sqrt(2) / np.pi, 'CDELT2': 2 * np.sqrt(2) / np.pi, 'CTYPE1': 'RA---MOL', 'CTYPE2': 'DEC--MOL', 'RADESYS': 'ICRS'} wcs = WCS(header) fig, ax = plt.subplots( subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs)) ax.set_xlim(-0.5, header['NAXIS1'] - 0.5) ax.set_ylim(-0.5, header['NAXIS2'] - 0.5) ax.coords[0].set_ticklabel(exclude_overlapping=True) ax.coords[1].set_ticklabel(exclude_overlapping=True) ax.coords[0].set_ticks(spacing=45 * u.deg) ax.coords[1].set_ticks(spacing=30 * u.deg) ax.grid() fig.savefig(tmpdir / 'plot.png') @pytest.mark.parametrize('frame_class', [RectangularFrame, EllipticalFrame]) def test_set_labels_with_coords(ignore_matplotlibrc, frame_class): """Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435. """ labels = ['RA', 'Declination'] header = { 'NAXIS': 2, 'NAXIS1': 360, 'NAXIS2': 180, 'CRPIX1': 180.5, 'CRPIX2': 90.5, 'CRVAL1': 180.0, 'CRVAL2': 0.0, 'CDELT1': -2 * np.sqrt(2) / np.pi, 'CDELT2': 2 * np.sqrt(2) / np.pi, 'CTYPE1': 'RA---AIT', 'CTYPE2': 'DEC--AIT'} wcs = WCS(header) fig, ax = plt.subplots( subplot_kw=dict(frame_class=frame_class, projection=wcs)) ax.set_xlabel(labels[0]) ax.set_ylabel(labels[1]) assert ax.get_xlabel() == labels[0] assert ax.get_ylabel() == labels[1] for i in range(2): assert ax.coords[i].get_axislabel() == labels[i] @pytest.mark.parametrize('atol', [0.2, 1.0e-8]) def test_bbox_size(atol): # Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now) extents = [11.38888888888889, 3.5, 576.0, 432.0] fig = plt.figure() ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8]) fig.add_axes(ax) fig.canvas.draw() renderer = fig.canvas.renderer ax_bbox = ax.get_tightbbox(renderer) # Enforce strict test only with reference Freetype version if atol < 0.1 and not FREETYPE_261: pytest.xfail("Exact BoundingBox dimensions are only ensured with FreeType 2.6.1") assert np.allclose(ax_bbox.extents, extents, atol=atol)
bparafina/shadowsocks
refs/heads/master
shadowsocks/shell.py
652
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import os import json import sys import getopt import logging from shadowsocks.common import to_bytes, to_str, IPNetwork from shadowsocks import encrypt VERBOSE_LEVEL = 5 verbose = 0 def check_python(): info = sys.version_info if info[0] == 2 and not info[1] >= 6: print('Python 2.6+ required') sys.exit(1) elif info[0] == 3 and not info[1] >= 3: print('Python 3.3+ required') sys.exit(1) elif info[0] not in [2, 3]: print('Python version not supported') sys.exit(1) def print_exception(e): global verbose logging.error(e) if verbose > 0: import traceback traceback.print_exc() def print_shadowsocks(): version = '' try: import pkg_resources version = pkg_resources.get_distribution('shadowsocks').version except Exception: pass print('Shadowsocks %s' % version) def find_config(): config_path = 'config.json' if os.path.exists(config_path): return config_path config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json') if os.path.exists(config_path): return config_path return None def check_config(config, is_local): if config.get('daemon', None) == 'stop': # no need to specify configuration for daemon stop return if is_local and not config.get('password', None): logging.error('password not specified') print_help(is_local) sys.exit(2) if not is_local and not config.get('password', None) \ and not config.get('port_password', None) \ and not config.get('manager_address'): logging.error('password or port_password not specified') print_help(is_local) sys.exit(2) if 'local_port' in config: config['local_port'] = int(config['local_port']) if config.get('server_port', None) and type(config['server_port']) != list: config['server_port'] = int(config['server_port']) if config.get('local_address', '') in [b'0.0.0.0']: logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe') if config.get('server', '') in ['127.0.0.1', 'localhost']: logging.warn('warning: server set to listen on %s:%s, are you sure?' % (to_str(config['server']), config['server_port'])) if (config.get('method', '') or '').lower() == 'table': logging.warn('warning: table is not safe; please use a safer cipher, ' 'like AES-256-CFB') if (config.get('method', '') or '').lower() == 'rc4': logging.warn('warning: RC4 is not safe; please use a safer cipher, ' 'like AES-256-CFB') if config.get('timeout', 300) < 100: logging.warn('warning: your timeout %d seems too short' % int(config.get('timeout'))) if config.get('timeout', 300) > 600: logging.warn('warning: your timeout %d seems too long' % int(config.get('timeout'))) if config.get('password') in [b'mypassword']: logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your ' 'config.json!') sys.exit(1) if config.get('user', None) is not None: if os.name != 'posix': logging.error('user can be used only on Unix') sys.exit(1) encrypt.try_cipher(config['password'], config['method']) def get_config(is_local): global verbose logging.basicConfig(level=logging.INFO, format='%(levelname)-s: %(message)s') if is_local: shortopts = 'hd:s:b:p:k:l:m:c:t:vq' longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=', 'version'] else: shortopts = 'hd:s:p:k:m:c:t:vq' longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=', 'forbidden-ip=', 'user=', 'manager-address=', 'version'] try: config_path = find_config() optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts) for key, value in optlist: if key == '-c': config_path = value if config_path: logging.info('loading config from %s' % config_path) with open(config_path, 'rb') as f: try: config = parse_json_in_str(f.read().decode('utf8')) except ValueError as e: logging.error('found an error in config.json: %s', e.message) sys.exit(1) else: config = {} v_count = 0 for key, value in optlist: if key == '-p': config['server_port'] = int(value) elif key == '-k': config['password'] = to_bytes(value) elif key == '-l': config['local_port'] = int(value) elif key == '-s': config['server'] = to_str(value) elif key == '-m': config['method'] = to_str(value) elif key == '-b': config['local_address'] = to_str(value) elif key == '-v': v_count += 1 # '-vv' turns on more verbose mode config['verbose'] = v_count elif key == '-t': config['timeout'] = int(value) elif key == '--fast-open': config['fast_open'] = True elif key == '--workers': config['workers'] = int(value) elif key == '--manager-address': config['manager_address'] = value elif key == '--user': config['user'] = to_str(value) elif key == '--forbidden-ip': config['forbidden_ip'] = to_str(value).split(',') elif key in ('-h', '--help'): if is_local: print_local_help() else: print_server_help() sys.exit(0) elif key == '--version': print_shadowsocks() sys.exit(0) elif key == '-d': config['daemon'] = to_str(value) elif key == '--pid-file': config['pid-file'] = to_str(value) elif key == '--log-file': config['log-file'] = to_str(value) elif key == '-q': v_count -= 1 config['verbose'] = v_count except getopt.GetoptError as e: print(e, file=sys.stderr) print_help(is_local) sys.exit(2) if not config: logging.error('config not specified') print_help(is_local) sys.exit(2) config['password'] = to_bytes(config.get('password', b'')) config['method'] = to_str(config.get('method', 'aes-256-cfb')) config['port_password'] = config.get('port_password', None) config['timeout'] = int(config.get('timeout', 300)) config['fast_open'] = config.get('fast_open', False) config['workers'] = config.get('workers', 1) config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid') config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log') config['verbose'] = config.get('verbose', False) config['local_address'] = to_str(config.get('local_address', '127.0.0.1')) config['local_port'] = config.get('local_port', 1080) if is_local: if config.get('server', None) is None: logging.error('server addr not specified') print_local_help() sys.exit(2) else: config['server'] = to_str(config['server']) else: config['server'] = to_str(config.get('server', '0.0.0.0')) try: config['forbidden_ip'] = \ IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128')) except Exception as e: logging.error(e) sys.exit(2) config['server_port'] = config.get('server_port', None) logging.getLogger('').handlers = [] logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE') if config['verbose'] >= 2: level = VERBOSE_LEVEL elif config['verbose'] == 1: level = logging.DEBUG elif config['verbose'] == -1: level = logging.WARN elif config['verbose'] <= -2: level = logging.ERROR else: level = logging.INFO verbose = config['verbose'] logging.basicConfig(level=level, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') check_config(config, is_local) return config def print_help(is_local): if is_local: print_local_help() else: print_server_help() def print_local_help(): print('''usage: sslocal [OPTION]... A fast tunnel proxy that helps you bypass firewalls. You can supply configurations via either config file or command line arguments. Proxy options: -c CONFIG path to config file -s SERVER_ADDR server address -p SERVER_PORT server port, default: 8388 -b LOCAL_ADDR local binding address, default: 127.0.0.1 -l LOCAL_PORT local port, default: 1080 -k PASSWORD password -m METHOD encryption method, default: aes-256-cfb -t TIMEOUT timeout in seconds, default: 300 --fast-open use TCP_FASTOPEN, requires Linux 3.7+ General options: -h, --help show this help message and exit -d start/stop/restart daemon mode --pid-file PID_FILE pid file for daemon mode --log-file LOG_FILE log file for daemon mode --user USER username to run as -v, -vv verbose mode -q, -qq quiet mode, only show warnings/errors --version show version information Online help: <https://github.com/shadowsocks/shadowsocks> ''') def print_server_help(): print('''usage: ssserver [OPTION]... A fast tunnel proxy that helps you bypass firewalls. You can supply configurations via either config file or command line arguments. Proxy options: -c CONFIG path to config file -s SERVER_ADDR server address, default: 0.0.0.0 -p SERVER_PORT server port, default: 8388 -k PASSWORD password -m METHOD encryption method, default: aes-256-cfb -t TIMEOUT timeout in seconds, default: 300 --fast-open use TCP_FASTOPEN, requires Linux 3.7+ --workers WORKERS number of workers, available on Unix/Linux --forbidden-ip IPLIST comma seperated IP list forbidden to connect --manager-address ADDR optional server manager UDP address, see wiki General options: -h, --help show this help message and exit -d start/stop/restart daemon mode --pid-file PID_FILE pid file for daemon mode --log-file LOG_FILE log file for daemon mode --user USER username to run as -v, -vv verbose mode -q, -qq quiet mode, only show warnings/errors --version show version information Online help: <https://github.com/shadowsocks/shadowsocks> ''') def _decode_list(data): rv = [] for item in data: if hasattr(item, 'encode'): item = item.encode('utf-8') elif isinstance(item, list): item = _decode_list(item) elif isinstance(item, dict): item = _decode_dict(item) rv.append(item) return rv def _decode_dict(data): rv = {} for key, value in data.items(): if hasattr(value, 'encode'): value = value.encode('utf-8') elif isinstance(value, list): value = _decode_list(value) elif isinstance(value, dict): value = _decode_dict(value) rv[key] = value return rv def parse_json_in_str(data): # parse json and convert everything from unicode to str return json.loads(data, object_hook=_decode_dict)
tumbl3w33d/ansible
refs/heads/devel
lib/ansible/plugins/action/set_fact.py
109
# Copyright 2013 Dag Wieers <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.six import iteritems, string_types from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.action import ActionBase from ansible.utils.vars import isidentifier import ansible.constants as C class ActionModule(ActionBase): TRANSFERS_FILES = False def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect facts = dict() cacheable = boolean(self._task.args.pop('cacheable', False)) if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only " "letters, numbers and underscores." % k) return result if not C.DEFAULT_JINJA2_NATIVE and isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v, strict=False) facts[k] = v result['changed'] = False result['ansible_facts'] = facts result['_ansible_facts_cacheable'] = cacheable return result
johnkingsley/aprinter
refs/heads/master
upload.py
1
from __future__ import print_function import argparse import sys import signal def main(): signal.signal(signal.SIGINT, signal.SIG_DFL) parser = argparse.ArgumentParser() parser.add_argument('-l', '--length', type=int, default=1000000) parser.add_argument('-c', '--chunked', action='store_true') parser.add_argument('-s', '--chunk-size', type=int, default=512) parser.add_argument('-p', '--request-path', default='/uploadTest') args = parser.parse_args() assert args.chunk_size > 0 request = '' request += 'POST {} HTTP/1.1\r\n'.format(args.request_path) request += 'Connection: close\r\n' if args.chunked: request += 'Transfer-Encoding: Chunked\r\n' else: request += 'Content-Length: {}\r\n'.format(args.length) request += '\r\n' sys.stdout.write(request) rem_length = args.length while rem_length > 0: chunk_size = min(rem_length, args.chunk_size) rem_length -= chunk_size chunk_data = 'X' * chunk_size if args.chunked: chunk = '{:X}\r\n{}\r\n'.format(chunk_size, chunk_data) else: chunk = chunk_data sys.stdout.write(chunk) if args.chunked: sys.stdout.write('0\r\n\r\n') if __name__ == '__main__': main()
Teamxrtc/webrtc-streaming-node
refs/heads/master
third_party/webrtc/src/chromium/src/third_party/jinja2/filters.py
598
# -*- coding: utf-8 -*- """ jinja2.filters ~~~~~~~~~~~~~~ Bundled jinja filters. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import re import math from random import choice from operator import itemgetter from itertools import groupby from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \ unicode_urlencode from jinja2.runtime import Undefined from jinja2.exceptions import FilterArgumentError from jinja2._compat import next, imap, string_types, text_type, iteritems _word_re = re.compile(r'\w+(?u)') def contextfilter(f): """Decorator for marking context dependent filters. The current :class:`Context` will be passed as first argument. """ f.contextfilter = True return f def evalcontextfilter(f): """Decorator for marking eval-context dependent filters. An eval context object is passed as first argument. For more information about the eval context, see :ref:`eval-context`. .. versionadded:: 2.4 """ f.evalcontextfilter = True return f def environmentfilter(f): """Decorator for marking evironment dependent filters. The current :class:`Environment` is passed to the filter as first argument. """ f.environmentfilter = True return f def make_attrgetter(environment, attribute): """Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers. """ if not isinstance(attribute, string_types) \ or ('.' not in attribute and not attribute.isdigit()): return lambda x: environment.getitem(x, attribute) attribute = attribute.split('.') def attrgetter(item): for part in attribute: if part.isdigit(): part = int(part) item = environment.getitem(item, part) return item return attrgetter def do_forceescape(value): """Enforce HTML escaping. This will probably double escape variables.""" if hasattr(value, '__html__'): value = value.__html__() return escape(text_type(value)) def do_urlencode(value): """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both dictionaries and regular strings as well as pairwise iterables. .. versionadded:: 2.7 """ itemiter = None if isinstance(value, dict): itemiter = iteritems(value) elif not isinstance(value, string_types): try: itemiter = iter(value) except TypeError: pass if itemiter is None: return unicode_urlencode(value) return u'&'.join(unicode_urlencode(k) + '=' + unicode_urlencode(v) for k, v in itemiter) @evalcontextfilter def do_replace(eval_ctx, s, old, new, count=None): """Return a copy of the value with all occurrences of a substring replaced with a new one. The first argument is the substring that should be replaced, the second is the replacement string. If the optional third argument ``count`` is given, only the first ``count`` occurrences are replaced: .. sourcecode:: jinja {{ "Hello World"|replace("Hello", "Goodbye") }} -> Goodbye World {{ "aaaaargh"|replace("a", "d'oh, ", 2) }} -> d'oh, d'oh, aaargh """ if count is None: count = -1 if not eval_ctx.autoescape: return text_type(s).replace(text_type(old), text_type(new), count) if hasattr(old, '__html__') or hasattr(new, '__html__') and \ not hasattr(s, '__html__'): s = escape(s) else: s = soft_unicode(s) return s.replace(soft_unicode(old), soft_unicode(new), count) def do_upper(s): """Convert a value to uppercase.""" return soft_unicode(s).upper() def do_lower(s): """Convert a value to lowercase.""" return soft_unicode(s).lower() @evalcontextfilter def do_xmlattr(_eval_ctx, d, autospace=True): """Create an SGML/XML attribute string based on the items in a dict. All values that are neither `none` nor `undefined` are automatically escaped: .. sourcecode:: html+jinja <ul{{ {'class': 'my_list', 'missing': none, 'id': 'list-%d'|format(variable)}|xmlattr }}> ... </ul> Results in something like this: .. sourcecode:: html <ul class="my_list" id="list-42"> ... </ul> As you can see it automatically prepends a space in front of the item if the filter returned something unless the second parameter is false. """ rv = u' '.join( u'%s="%s"' % (escape(key), escape(value)) for key, value in iteritems(d) if value is not None and not isinstance(value, Undefined) ) if autospace and rv: rv = u' ' + rv if _eval_ctx.autoescape: rv = Markup(rv) return rv def do_capitalize(s): """Capitalize a value. The first character will be uppercase, all others lowercase. """ return soft_unicode(s).capitalize() def do_title(s): """Return a titlecased version of the value. I.e. words will start with uppercase letters, all remaining characters are lowercase. """ rv = [] for item in re.compile(r'([-\s]+)(?u)').split(s): if not item: continue rv.append(item[0].upper() + item[1:].lower()) return ''.join(rv) def do_dictsort(value, case_sensitive=False, by='key'): """Sort a dict and yield (key, value) pairs. Because python dicts are unsorted you may want to use this function to order them by either key or value: .. sourcecode:: jinja {% for item in mydict|dictsort %} sort the dict by key, case insensitive {% for item in mydict|dictsort(true) %} sort the dict by key, case sensitive {% for item in mydict|dictsort(false, 'value') %} sort the dict by key, case insensitive, sorted normally and ordered by value. """ if by == 'key': pos = 0 elif by == 'value': pos = 1 else: raise FilterArgumentError('You can only sort by either ' '"key" or "value"') def sort_func(item): value = item[pos] if isinstance(value, string_types) and not case_sensitive: value = value.lower() return value return sorted(value.items(), key=sort_func) @environmentfilter def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None): """Sort an iterable. Per default it sorts ascending, if you pass it true as first argument it will reverse the sorting. If the iterable is made of strings the third parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} It is also possible to sort by an attribute (for example to sort by the date of an object) by specifying the `attribute` parameter: .. sourcecode:: jinja {% for item in iterable|sort(attribute='date') %} ... {% endfor %} .. versionchanged:: 2.6 The `attribute` parameter was added. """ if not case_sensitive: def sort_func(item): if isinstance(item, string_types): item = item.lower() return item else: sort_func = None if attribute is not None: getter = make_attrgetter(environment, attribute) def sort_func(item, processor=sort_func or (lambda x: x)): return processor(getter(item)) return sorted(value, key=sort_func, reverse=reverse) def do_default(value, default_value=u'', boolean=False): """If the value is undefined it will return the passed default value, otherwise the value of the variable: .. sourcecode:: jinja {{ my_variable|default('my_variable is not defined') }} This will output the value of ``my_variable`` if the variable was defined, otherwise ``'my_variable is not defined'``. If you want to use default with variables that evaluate to false you have to set the second parameter to `true`: .. sourcecode:: jinja {{ ''|default('the string was empty', true) }} """ if isinstance(value, Undefined) or (boolean and not value): return default_value return value @evalcontextfilter def do_join(eval_ctx, value, d=u'', attribute=None): """Return a string which is the concatenation of the strings in the sequence. The separator between elements is an empty string per default, you can define it with the optional parameter: .. sourcecode:: jinja {{ [1, 2, 3]|join('|') }} -> 1|2|3 {{ [1, 2, 3]|join }} -> 123 It is also possible to join certain attributes of an object: .. sourcecode:: jinja {{ users|join(', ', attribute='username') }} .. versionadded:: 2.6 The `attribute` parameter was added. """ if attribute is not None: value = imap(make_attrgetter(eval_ctx.environment, attribute), value) # no automatic escaping? joining is a lot eaiser then if not eval_ctx.autoescape: return text_type(d).join(imap(text_type, value)) # if the delimiter doesn't have an html representation we check # if any of the items has. If yes we do a coercion to Markup if not hasattr(d, '__html__'): value = list(value) do_escape = False for idx, item in enumerate(value): if hasattr(item, '__html__'): do_escape = True else: value[idx] = text_type(item) if do_escape: d = escape(d) else: d = text_type(d) return d.join(value) # no html involved, to normal joining return soft_unicode(d).join(imap(soft_unicode, value)) def do_center(value, width=80): """Centers the value in a field of a given width.""" return text_type(value).center(width) @environmentfilter def do_first(environment, seq): """Return the first item of a sequence.""" try: return next(iter(seq)) except StopIteration: return environment.undefined('No first item, sequence was empty.') @environmentfilter def do_last(environment, seq): """Return the last item of a sequence.""" try: return next(iter(reversed(seq))) except StopIteration: return environment.undefined('No last item, sequence was empty.') @environmentfilter def do_random(environment, seq): """Return a random item from the sequence.""" try: return choice(seq) except IndexError: return environment.undefined('No random item, sequence was empty.') def do_filesizeformat(value, binary=False): """Format the value like a 'human-readable' file size (i.e. 13 kB, 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega, Giga, etc.), if the second parameter is set to `True` the binary prefixes are used (Mebi, Gibi). """ bytes = float(value) base = binary and 1024 or 1000 prefixes = [ (binary and 'KiB' or 'kB'), (binary and 'MiB' or 'MB'), (binary and 'GiB' or 'GB'), (binary and 'TiB' or 'TB'), (binary and 'PiB' or 'PB'), (binary and 'EiB' or 'EB'), (binary and 'ZiB' or 'ZB'), (binary and 'YiB' or 'YB') ] if bytes == 1: return '1 Byte' elif bytes < base: return '%d Bytes' % bytes else: for i, prefix in enumerate(prefixes): unit = base ** (i + 2) if bytes < unit: return '%.1f %s' % ((base * bytes / unit), prefix) return '%.1f %s' % ((base * bytes / unit), prefix) def do_pprint(value, verbose=False): """Pretty print a variable. Useful for debugging. With Jinja 1.2 onwards you can pass it a parameter. If this parameter is truthy the output will be more verbose (this requires `pretty`) """ return pformat(value, verbose=verbose) @evalcontextfilter def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False): """Converts URLs in plain text into clickable links. If you pass the filter an additional integer it will shorten the urls to that number. Also a third argument exists that makes the urls "nofollow": .. sourcecode:: jinja {{ mytext|urlize(40, true) }} links are shortened to 40 chars and defined with rel="nofollow" """ rv = urlize(value, trim_url_limit, nofollow) if eval_ctx.autoescape: rv = Markup(rv) return rv def do_indent(s, width=4, indentfirst=False): """Return a copy of the passed string, each line indented by 4 spaces. The first line is not indented. If you want to change the number of spaces or indent the first line too you can pass additional parameters to the filter: .. sourcecode:: jinja {{ mytext|indent(2, true) }} indent by two spaces and indent the first line too. """ indention = u' ' * width rv = (u'\n' + indention).join(s.splitlines()) if indentfirst: rv = indention + rv return rv def do_truncate(s, length=255, killwords=False, end='...'): """Return a truncated copy of the string. The length is specified with the first parameter which defaults to ``255``. If the second parameter is ``true`` the filter will cut the text at length. Otherwise it will discard the last word. If the text was in fact truncated it will append an ellipsis sign (``"..."``). If you want a different ellipsis sign than ``"..."`` you can specify it using the third parameter. .. sourcecode:: jinja {{ "foo bar"|truncate(5) }} -> "foo ..." {{ "foo bar"|truncate(5, True) }} -> "foo b..." """ if len(s) <= length: return s elif killwords: return s[:length] + end words = s.split(' ') result = [] m = 0 for word in words: m += len(word) + 1 if m > length: break result.append(word) result.append(end) return u' '.join(result) @environmentfilter def do_wordwrap(environment, s, width=79, break_long_words=True, wrapstring=None): """ Return a copy of the string passed to the filter wrapped after ``79`` characters. You can override this default using the first parameter. If you set the second parameter to `false` Jinja will not split words apart if they are longer than `width`. By default, the newlines will be the default newlines for the environment, but this can be changed using the wrapstring keyword argument. .. versionadded:: 2.7 Added support for the `wrapstring` parameter. """ if not wrapstring: wrapstring = environment.newline_sequence import textwrap return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False, replace_whitespace=False, break_long_words=break_long_words)) def do_wordcount(s): """Count the words in that string.""" return len(_word_re.findall(s)) def do_int(value, default=0): """Convert the value into an integer. If the conversion doesn't work it will return ``0``. You can override this default using the first parameter. """ try: return int(value) except (TypeError, ValueError): # this quirk is necessary so that "42.23"|int gives 42. try: return int(float(value)) except (TypeError, ValueError): return default def do_float(value, default=0.0): """Convert the value into a floating point number. If the conversion doesn't work it will return ``0.0``. You can override this default using the first parameter. """ try: return float(value) except (TypeError, ValueError): return default def do_format(value, *args, **kwargs): """ Apply python string formatting on an object: .. sourcecode:: jinja {{ "%s - %s"|format("Hello?", "Foo!") }} -> Hello? - Foo! """ if args and kwargs: raise FilterArgumentError('can\'t handle positional and keyword ' 'arguments at the same time') return soft_unicode(value) % (kwargs or args) def do_trim(value): """Strip leading and trailing whitespace.""" return soft_unicode(value).strip() def do_striptags(value): """Strip SGML/XML tags and replace adjacent whitespace by one space. """ if hasattr(value, '__html__'): value = value.__html__() return Markup(text_type(value)).striptags() def do_slice(value, slices, fill_with=None): """Slice an iterator and return a list of lists containing those items. Useful if you want to create a div containing three ul tags that represent columns: .. sourcecode:: html+jinja <div class="columwrapper"> {%- for column in items|slice(3) %} <ul class="column-{{ loop.index }}"> {%- for item in column %} <li>{{ item }}</li> {%- endfor %} </ul> {%- endfor %} </div> If you pass it a second argument it's used to fill missing values on the last iteration. """ seq = list(value) length = len(seq) items_per_slice = length // slices slices_with_extra = length % slices offset = 0 for slice_number in range(slices): start = offset + slice_number * items_per_slice if slice_number < slices_with_extra: offset += 1 end = offset + (slice_number + 1) * items_per_slice tmp = seq[start:end] if fill_with is not None and slice_number >= slices_with_extra: tmp.append(fill_with) yield tmp def do_batch(value, linecount, fill_with=None): """ A filter that batches items. It works pretty much like `slice` just the other way round. It returns a list of lists with the given number of items. If you provide a second parameter this is used to fill up missing items. See this example: .. sourcecode:: html+jinja <table> {%- for row in items|batch(3, '&nbsp;') %} <tr> {%- for column in row %} <td>{{ column }}</td> {%- endfor %} </tr> {%- endfor %} </table> """ result = [] tmp = [] for item in value: if len(tmp) == linecount: yield tmp tmp = [] tmp.append(item) if tmp: if fill_with is not None and len(tmp) < linecount: tmp += [fill_with] * (linecount - len(tmp)) yield tmp def do_round(value, precision=0, method='common'): """Round the number to a given precision. The first parameter specifies the precision (default is ``0``), the second the rounding method: - ``'common'`` rounds either up or down - ``'ceil'`` always rounds up - ``'floor'`` always rounds down If you don't specify a method ``'common'`` is used. .. sourcecode:: jinja {{ 42.55|round }} -> 43.0 {{ 42.55|round(1, 'floor') }} -> 42.5 Note that even if rounded to 0 precision, a float is returned. If you need a real integer, pipe it through `int`: .. sourcecode:: jinja {{ 42.55|round|int }} -> 43 """ if not method in ('common', 'ceil', 'floor'): raise FilterArgumentError('method must be common, ceil or floor') if method == 'common': return round(value, precision) func = getattr(math, method) return func(value * (10 ** precision)) / (10 ** precision) @environmentfilter def do_groupby(environment, value, attribute): """Group a sequence of objects by a common attribute. If you for example have a list of dicts or objects that represent persons with `gender`, `first_name` and `last_name` attributes and you want to group all users by genders you can do something like the following snippet: .. sourcecode:: html+jinja <ul> {% for group in persons|groupby('gender') %} <li>{{ group.grouper }}<ul> {% for person in group.list %} <li>{{ person.first_name }} {{ person.last_name }}</li> {% endfor %}</ul></li> {% endfor %} </ul> Additionally it's possible to use tuple unpacking for the grouper and list: .. sourcecode:: html+jinja <ul> {% for grouper, list in persons|groupby('gender') %} ... {% endfor %} </ul> As you can see the item we're grouping by is stored in the `grouper` attribute and the `list` contains all the objects that have this grouper in common. .. versionchanged:: 2.6 It's now possible to use dotted notation to group by the child attribute of another attribute. """ expr = make_attrgetter(environment, attribute) return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr))) class _GroupTuple(tuple): __slots__ = () grouper = property(itemgetter(0)) list = property(itemgetter(1)) def __new__(cls, xxx_todo_changeme): (key, value) = xxx_todo_changeme return tuple.__new__(cls, (key, list(value))) @environmentfilter def do_sum(environment, iterable, attribute=None, start=0): """Returns the sum of a sequence of numbers plus the value of parameter 'start' (which defaults to 0). When the sequence is empty it returns start. It is also possible to sum up only certain attributes: .. sourcecode:: jinja Total: {{ items|sum(attribute='price') }} .. versionchanged:: 2.6 The `attribute` parameter was added to allow suming up over attributes. Also the `start` parameter was moved on to the right. """ if attribute is not None: iterable = imap(make_attrgetter(environment, attribute), iterable) return sum(iterable, start) def do_list(value): """Convert the value into a list. If it was a string the returned list will be a list of characters. """ return list(value) def do_mark_safe(value): """Mark the value as safe which means that in an environment with automatic escaping enabled this variable will not be escaped. """ return Markup(value) def do_mark_unsafe(value): """Mark a value as unsafe. This is the reverse operation for :func:`safe`.""" return text_type(value) def do_reverse(value): """Reverse the object or return an iterator the iterates over it the other way round. """ if isinstance(value, string_types): return value[::-1] try: return reversed(value) except TypeError: try: rv = list(value) rv.reverse() return rv except TypeError: raise FilterArgumentError('argument must be iterable') @environmentfilter def do_attr(environment, obj, name): """Get an attribute of an object. ``foo|attr("bar")`` works like ``foo["bar"]`` just that always an attribute is returned and items are not looked up. See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details. """ try: name = str(name) except UnicodeError: pass else: try: value = getattr(obj, name) except AttributeError: pass else: if environment.sandboxed and not \ environment.is_safe_attribute(obj, name, value): return environment.unsafe_undefined(obj, name) return value return environment.undefined(obj=obj, name=name) @contextfilter def do_map(*args, **kwargs): """Applies a filter on a sequence of objects or looks up an attribute. This is useful when dealing with lists of objects but you are really only interested in a certain value of it. The basic usage is mapping on an attribute. Imagine you have a list of users but you are only interested in a list of usernames: .. sourcecode:: jinja Users on this page: {{ users|map(attribute='username')|join(', ') }} Alternatively you can let it invoke a filter by passing the name of the filter and the arguments afterwards. A good example would be applying a text conversion filter on a sequence: .. sourcecode:: jinja Users on this page: {{ titles|map('lower')|join(', ') }} .. versionadded:: 2.7 """ context = args[0] seq = args[1] if len(args) == 2 and 'attribute' in kwargs: attribute = kwargs.pop('attribute') if kwargs: raise FilterArgumentError('Unexpected keyword argument %r' % next(iter(kwargs))) func = make_attrgetter(context.environment, attribute) else: try: name = args[2] args = args[3:] except LookupError: raise FilterArgumentError('map requires a filter argument') func = lambda item: context.environment.call_filter( name, item, args, kwargs, context=context) if seq: for item in seq: yield func(item) @contextfilter def do_select(*args, **kwargs): """Filters a sequence of objects by appying a test to either the object or the attribute and only selecting the ones with the test succeeding. Example usage: .. sourcecode:: jinja {{ numbers|select("odd") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: x, False) @contextfilter def do_reject(*args, **kwargs): """Filters a sequence of objects by appying a test to either the object or the attribute and rejecting the ones with the test succeeding. Example usage: .. sourcecode:: jinja {{ numbers|reject("odd") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: not x, False) @contextfilter def do_selectattr(*args, **kwargs): """Filters a sequence of objects by appying a test to either the object or the attribute and only selecting the ones with the test succeeding. Example usage: .. sourcecode:: jinja {{ users|selectattr("is_active") }} {{ users|selectattr("email", "none") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: x, True) @contextfilter def do_rejectattr(*args, **kwargs): """Filters a sequence of objects by appying a test to either the object or the attribute and rejecting the ones with the test succeeding. .. sourcecode:: jinja {{ users|rejectattr("is_active") }} {{ users|rejectattr("email", "none") }} .. versionadded:: 2.7 """ return _select_or_reject(args, kwargs, lambda x: not x, True) def _select_or_reject(args, kwargs, modfunc, lookup_attr): context = args[0] seq = args[1] if lookup_attr: try: attr = args[2] except LookupError: raise FilterArgumentError('Missing parameter for attribute name') transfunc = make_attrgetter(context.environment, attr) off = 1 else: off = 0 transfunc = lambda x: x try: name = args[2 + off] args = args[3 + off:] func = lambda item: context.environment.call_test( name, item, args, kwargs) except LookupError: func = bool if seq: for item in seq: if modfunc(func(transfunc(item))): yield item FILTERS = { 'attr': do_attr, 'replace': do_replace, 'upper': do_upper, 'lower': do_lower, 'escape': escape, 'e': escape, 'forceescape': do_forceescape, 'capitalize': do_capitalize, 'title': do_title, 'default': do_default, 'd': do_default, 'join': do_join, 'count': len, 'dictsort': do_dictsort, 'sort': do_sort, 'length': len, 'reverse': do_reverse, 'center': do_center, 'indent': do_indent, 'title': do_title, 'capitalize': do_capitalize, 'first': do_first, 'last': do_last, 'map': do_map, 'random': do_random, 'reject': do_reject, 'rejectattr': do_rejectattr, 'filesizeformat': do_filesizeformat, 'pprint': do_pprint, 'truncate': do_truncate, 'wordwrap': do_wordwrap, 'wordcount': do_wordcount, 'int': do_int, 'float': do_float, 'string': soft_unicode, 'list': do_list, 'urlize': do_urlize, 'format': do_format, 'trim': do_trim, 'striptags': do_striptags, 'select': do_select, 'selectattr': do_selectattr, 'slice': do_slice, 'batch': do_batch, 'sum': do_sum, 'abs': abs, 'round': do_round, 'groupby': do_groupby, 'safe': do_mark_safe, 'xmlattr': do_xmlattr, 'urlencode': do_urlencode }
fritsvanveen/QGIS
refs/heads/master
python/plugins/processing/algs/examplescripts/__init__.py
15
# -*- coding: utf-8 -*- """ *************************************************************************** __init__.py --------------------- Date : July 2013 Copyright : (C) 2013 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ from .ProcessingExampleScriptsPlugin import ProcessingExampleScriptsPlugin __author__ = 'Victor Olaya' __date__ = 'July 2013' __copyright__ = '(C) 2013, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' def classFactory(iface): return ProcessingExampleScriptsPlugin()
LLNL/spack
refs/heads/develop
var/spack/repos/builtin/packages/py-transformers/package.py
5
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class PyTransformers(PythonPackage): """State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch""" homepage = "https://github.com/huggingface/transformers" url = "https://pypi.io/packages/source/t/transformers/transformers-2.8.0.tar.gz" maintainers = ['adamjstewart'] version('2.8.0', sha256='b9f29cdfd39c28f29e0806c321270dea337d6174a7aa60daf9625bf83dbb12ee') depends_on('[email protected]:', type=('build', 'run')) depends_on('py-setuptools', type='build') depends_on('py-numpy', type=('build', 'run')) depends_on('[email protected]', type=('build', 'run')) depends_on('py-dataclasses', when='^python@:3.6', type=('build', 'run')) depends_on('py-boto3', type=('build', 'run')) depends_on('py-filelock', type=('build', 'run')) depends_on('py-requests', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('py-regex@:2019.12.16,2019.12.18:', type=('build', 'run')) depends_on('py-sentencepiece', type=('build', 'run')) depends_on('py-sacremoses', type=('build', 'run'))
mmclenna/engine
refs/heads/master
testing/chromoting/download_test_files.py
57
# Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A script to download files required for Remoting integration tests from GCS. The script expects 2 parameters: input_files: a file containing the full path in GCS to each file that is to be downloaded. output_folder: the folder to which the specified files should be downloaded. This scripts expects that its execution is done on a machine where the credentials are correctly setup to obtain the required permissions for downloading files from the specified GCS buckets. """ import argparse import ntpath import os import subprocess import sys def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--files', help='File specifying files to be downloaded .') parser.add_argument( '-o', '--output_folder', help='Folder where specified files should be downloaded .') if len(sys.argv) < 3: parser.print_help() sys.exit(1) args = parser.parse_args() if not args.files or not args.output_folder: parser.print_help() sys.exit(1) # Loop through lines in input file specifying source file locations. with open(args.files) as f: for line in f: # Copy the file to the output folder, with same name as source file. output_file = os.path.join(args.output_folder, ntpath.basename(line)) # Download specified file from GCS. cp_cmd = ['gsutil.py', 'cp', line, output_file] try: subprocess.check_call(cp_cmd) except subprocess.CalledProcessError, e: print e.output sys.exit(1) if __name__ == '__main__': main()
jmcarp/django
refs/heads/master
django/core/context_processors.py
309
import warnings from django.template.context_processors import * # NOQA from django.utils.deprecation import RemovedInDjango110Warning warnings.warn( "django.core.context_processors is deprecated in favor of " "django.template.context_processors.", RemovedInDjango110Warning, stacklevel=2)