repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
TNT-Samuel/Coding-Projects
refs/heads/master
DNS Server/Source/Lib/site-packages/joblib/test/common.py
4
""" Small utilities for testing. """ import threading import signal import time import os import sys import gc from joblib._compat import PY3_OR_LATER from joblib._multiprocessing_helpers import mp from joblib.testing import SkipTest, skipif try: import lz4 except ImportError: lz4 = None # A decorator to run tests only when numpy is available try: import numpy as np def with_numpy(func): """A decorator to skip tests requiring numpy.""" return func except ImportError: def with_numpy(func): """A decorator to skip tests requiring numpy.""" def my_func(): raise SkipTest('Test requires numpy') return my_func np = None # TODO: Turn this back on after refactoring yield based tests in test_hashing # with_numpy = skipif(not np, reason='Test requires numpy.') # we use memory_profiler library for memory consumption checks try: from memory_profiler import memory_usage def with_memory_profiler(func): """A decorator to skip tests requiring memory_profiler.""" return func def memory_used(func, *args, **kwargs): """Compute memory usage when executing func.""" gc.collect() mem_use = memory_usage((func, args, kwargs), interval=.001) return max(mem_use) - min(mem_use) except ImportError: def with_memory_profiler(func): """A decorator to skip tests requiring memory_profiler.""" def dummy_func(): raise SkipTest('Test requires memory_profiler.') return dummy_func memory_usage = memory_used = None # A utility to kill the test runner in case a multiprocessing assumption # triggers an infinite wait on a pipe by the master process for one of its # failed workers _KILLER_THREADS = dict() def setup_autokill(module_name, timeout=30): """Timeout based suiciding thread to kill the test runner process If some subprocess dies in an unexpected way we don't want the parent process to block indefinitely. """ if "NO_AUTOKILL" in os.environ or "--pdb" in sys.argv: # Do not install the autokiller return # Renew any previous contract under that name by first cancelling the # previous version (that should normally not happen in practice) teardown_autokill(module_name) def autokill(): pid = os.getpid() print("Timeout exceeded: terminating stalled process: %d" % pid) os.kill(pid, signal.SIGTERM) # If were are still there ask the OS to kill ourself for real time.sleep(0.5) print("Timeout exceeded: killing stalled process: %d" % pid) os.kill(pid, signal.SIGKILL) _KILLER_THREADS[module_name] = t = threading.Timer(timeout, autokill) t.start() def teardown_autokill(module_name): """Cancel a previously started killer thread""" killer = _KILLER_THREADS.get(module_name) if killer is not None: killer.cancel() with_multiprocessing = skipif( mp is None, reason='Needs multiprocessing to run.') with_dev_shm = skipif( not os.path.exists('/dev/shm'), reason='This test requires a large /dev/shm shared memory fs.') with_lz4 = skipif( lz4 is None or not PY3_OR_LATER, reason='Needs lz4 compression to run') without_lz4 = skipif( lz4 is not None, reason='Needs lz4 not being installed to run')
Pakoach/Sick-Beard
refs/heads/master
cherrypy/__init__.py
39
"""CherryPy is a pythonic, object-oriented HTTP framework. CherryPy consists of not one, but four separate API layers. The APPLICATION LAYER is the simplest. CherryPy applications are written as a tree of classes and methods, where each branch in the tree corresponds to a branch in the URL path. Each method is a 'page handler', which receives GET and POST params as keyword arguments, and returns or yields the (HTML) body of the response. The special method name 'index' is used for paths that end in a slash, and the special method name 'default' is used to handle multiple paths via a single handler. This layer also includes: * the 'exposed' attribute (and cherrypy.expose) * cherrypy.quickstart() * _cp_config attributes * cherrypy.tools (including cherrypy.session) * cherrypy.url() The ENVIRONMENT LAYER is used by developers at all levels. It provides information about the current request and response, plus the application and server environment, via a (default) set of top-level objects: * cherrypy.request * cherrypy.response * cherrypy.engine * cherrypy.server * cherrypy.tree * cherrypy.config * cherrypy.thread_data * cherrypy.log * cherrypy.HTTPError, NotFound, and HTTPRedirect * cherrypy.lib The EXTENSION LAYER allows advanced users to construct and share their own plugins. It consists of: * Hook API * Tool API * Toolbox API * Dispatch API * Config Namespace API Finally, there is the CORE LAYER, which uses the core API's to construct the default components which are available at higher layers. You can think of the default components as the 'reference implementation' for CherryPy. Megaframeworks (and advanced users) may replace the default components with customized or extended components. The core API's are: * Application API * Engine API * Request API * Server API * WSGI API These API's are described in the CherryPy specification: http://www.cherrypy.org/wiki/CherryPySpec """ __version__ = "3.2.0rc1" from urlparse import urljoin as _urljoin from urllib import urlencode as _urlencode class _AttributeDocstrings(type): """Metaclass for declaring docstrings for class attributes.""" # The full docstring for this type is down in the __init__ method so # that it doesn't show up in help() for every consumer class. def __init__(cls, name, bases, dct): '''Metaclass for declaring docstrings for class attributes. Base Python doesn't provide any syntax for setting docstrings on 'data attributes' (non-callables). This metaclass allows class definitions to follow the declaration of a data attribute with a docstring for that attribute; the attribute docstring will be popped from the class dict and folded into the class docstring. The naming convention for attribute docstrings is: <attrname> + "__doc". For example: class Thing(object): """A thing and its properties.""" __metaclass__ = cherrypy._AttributeDocstrings height = 50 height__doc = """The height of the Thing in inches.""" In which case, help(Thing) starts like this: >>> help(mod.Thing) Help on class Thing in module pkg.mod: class Thing(__builtin__.object) | A thing and its properties. | | height [= 50]: | The height of the Thing in inches. | The benefits of this approach over hand-edited class docstrings: 1. Places the docstring nearer to the attribute declaration. 2. Makes attribute docs more uniform ("name (default): doc"). 3. Reduces mismatches of attribute _names_ between the declaration and the documentation. 4. Reduces mismatches of attribute default _values_ between the declaration and the documentation. The benefits of a metaclass approach over other approaches: 1. Simpler ("less magic") than interface-based solutions. 2. __metaclass__ can be specified at the module global level for classic classes. For various formatting reasons, you should write multiline docs with a leading newline and not a trailing one: response__doc = """ The response object for the current thread. In the main thread, and any threads which are not HTTP requests, this is None.""" The type of the attribute is intentionally not included, because that's not How Python Works. Quack. ''' newdoc = [cls.__doc__ or ""] dctkeys = dct.keys() dctkeys.sort() for name in dctkeys: if name.endswith("__doc"): # Remove the magic doc attribute. if hasattr(cls, name): delattr(cls, name) # Make a uniformly-indented docstring from it. val = '\n'.join([' ' + line.strip() for line in dct[name].split('\n')]) # Get the default value. attrname = name[:-5] try: attrval = getattr(cls, attrname) except AttributeError: attrval = "missing" # Add the complete attribute docstring to our list. newdoc.append("%s [= %r]:\n%s" % (attrname, attrval, val)) # Add our list of new docstrings to the class docstring. cls.__doc__ = "\n\n".join(newdoc) from cherrypy._cperror import HTTPError, HTTPRedirect, InternalRedirect from cherrypy._cperror import NotFound, CherryPyException, TimeoutError from cherrypy import _cpdispatch as dispatch from cherrypy import _cptools tools = _cptools.default_toolbox Tool = _cptools.Tool from cherrypy import _cprequest from cherrypy.lib import httputil as _httputil from cherrypy import _cptree tree = _cptree.Tree() from cherrypy._cptree import Application from cherrypy import _cpwsgi as wsgi from cherrypy import process try: from cherrypy.process import win32 engine = win32.Win32Bus() engine.console_control_handler = win32.ConsoleCtrlHandler(engine) del win32 except ImportError: engine = process.bus # Timeout monitor class _TimeoutMonitor(process.plugins.Monitor): def __init__(self, bus): self.servings = [] process.plugins.Monitor.__init__(self, bus, self.run) def acquire(self): self.servings.append((serving.request, serving.response)) def release(self): try: self.servings.remove((serving.request, serving.response)) except ValueError: pass def run(self): """Check timeout on all responses. (Internal)""" for req, resp in self.servings: resp.check_timeout() engine.timeout_monitor = _TimeoutMonitor(engine) engine.timeout_monitor.subscribe() engine.autoreload = process.plugins.Autoreloader(engine) engine.autoreload.subscribe() engine.thread_manager = process.plugins.ThreadManager(engine) engine.thread_manager.subscribe() engine.signal_handler = process.plugins.SignalHandler(engine) from cherrypy import _cpserver server = _cpserver.Server() server.subscribe() def quickstart(root=None, script_name="", config=None): """Mount the given root, start the builtin server (and engine), then block. root: an instance of a "controller class" (a collection of page handler methods) which represents the root of the application. script_name: a string containing the "mount point" of the application. This should start with a slash, and be the path portion of the URL at which to mount the given root. For example, if root.index() will handle requests to "http://www.example.com:8080/dept/app1/", then the script_name argument would be "/dept/app1". It MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not "/"). config: a file or dict containing application config. If this contains a [global] section, those entries will be used in the global (site-wide) config. """ if config: _global_conf_alias.update(config) tree.mount(root, script_name, config) if hasattr(engine, "signal_handler"): engine.signal_handler.subscribe() if hasattr(engine, "console_control_handler"): engine.console_control_handler.subscribe() engine.start() engine.block() try: from threading import local as _local except ImportError: from cherrypy._cpthreadinglocal import local as _local class _Serving(_local): """An interface for registering request and response objects. Rather than have a separate "thread local" object for the request and the response, this class works as a single threadlocal container for both objects (and any others which developers wish to define). In this way, we can easily dump those objects when we stop/start a new HTTP conversation, yet still refer to them as module-level globals in a thread-safe way. """ __metaclass__ = _AttributeDocstrings request = _cprequest.Request(_httputil.Host("127.0.0.1", 80), _httputil.Host("127.0.0.1", 1111)) request__doc = """ The request object for the current thread. In the main thread, and any threads which are not receiving HTTP requests, this is None.""" response = _cprequest.Response() response__doc = """ The response object for the current thread. In the main thread, and any threads which are not receiving HTTP requests, this is None.""" def load(self, request, response): self.request = request self.response = response def clear(self): """Remove all attributes of self.""" self.__dict__.clear() serving = _Serving() class _ThreadLocalProxy(object): __slots__ = ['__attrname__', '__dict__'] def __init__(self, attrname): self.__attrname__ = attrname def __getattr__(self, name): child = getattr(serving, self.__attrname__) return getattr(child, name) def __setattr__(self, name, value): if name in ("__attrname__",): object.__setattr__(self, name, value) else: child = getattr(serving, self.__attrname__) setattr(child, name, value) def __delattr__(self, name): child = getattr(serving, self.__attrname__) delattr(child, name) def _get_dict(self): child = getattr(serving, self.__attrname__) d = child.__class__.__dict__.copy() d.update(child.__dict__) return d __dict__ = property(_get_dict) def __getitem__(self, key): child = getattr(serving, self.__attrname__) return child[key] def __setitem__(self, key, value): child = getattr(serving, self.__attrname__) child[key] = value def __delitem__(self, key): child = getattr(serving, self.__attrname__) del child[key] def __contains__(self, key): child = getattr(serving, self.__attrname__) return key in child def __len__(self): child = getattr(serving, self.__attrname__) return len(child) def __nonzero__(self): child = getattr(serving, self.__attrname__) return bool(child) # Create request and response object (the same objects will be used # throughout the entire life of the webserver, but will redirect # to the "serving" object) request = _ThreadLocalProxy('request') response = _ThreadLocalProxy('response') # Create thread_data object as a thread-specific all-purpose storage class _ThreadData(_local): """A container for thread-specific data.""" thread_data = _ThreadData() # Monkeypatch pydoc to allow help() to go through the threadlocal proxy. # Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve. # The only other way would be to change what is returned from type(request) # and that's not possible in pure Python (you'd have to fake ob_type). def _cherrypy_pydoc_resolve(thing, forceload=0): """Given an object or a path to an object, get the object and its name.""" if isinstance(thing, _ThreadLocalProxy): thing = getattr(serving, thing.__attrname__) return _pydoc._builtin_resolve(thing, forceload) try: import pydoc as _pydoc _pydoc._builtin_resolve = _pydoc.resolve _pydoc.resolve = _cherrypy_pydoc_resolve except ImportError: pass from cherrypy import _cplogging class _GlobalLogManager(_cplogging.LogManager): def __call__(self, *args, **kwargs): # Do NOT use try/except here. See http://www.cherrypy.org/ticket/945 if hasattr(request, 'app') and hasattr(request.app, 'log'): log = request.app.log else: log = self return log.error(*args, **kwargs) def access(self): try: return request.app.log.access() except AttributeError: return _cplogging.LogManager.access(self) log = _GlobalLogManager() # Set a default screen handler on the global log. log.screen = True log.error_file = '' # Using an access file makes CP about 10% slower. Leave off by default. log.access_file = '' def _buslog(msg, level): log.error(msg, 'ENGINE', severity=level) engine.subscribe('log', _buslog) # Helper functions for CP apps # def expose(func=None, alias=None): """Expose the function, optionally providing an alias or set of aliases.""" def expose_(func): func.exposed = True if alias is not None: if isinstance(alias, basestring): parents[alias.replace(".", "_")] = func else: for a in alias: parents[a.replace(".", "_")] = func return func import sys, types if isinstance(func, (types.FunctionType, types.MethodType)): if alias is None: # @expose func.exposed = True return func else: # func = expose(func, alias) parents = sys._getframe(1).f_locals return expose_(func) elif func is None: if alias is None: # @expose() parents = sys._getframe(1).f_locals return expose_ else: # @expose(alias="alias") or # @expose(alias=["alias1", "alias2"]) parents = sys._getframe(1).f_locals return expose_ else: # @expose("alias") or # @expose(["alias1", "alias2"]) parents = sys._getframe(1).f_locals alias = func return expose_ def url(path="", qs="", script_name=None, base=None, relative=None): """Create an absolute URL for the given path. If 'path' starts with a slash ('/'), this will return (base + script_name + path + qs). If it does not start with a slash, this returns (base + script_name [+ request.path_info] + path + qs). If script_name is None, cherrypy.request will be used to find a script_name, if available. If base is None, cherrypy.request.base will be used (if available). Note that you can use cherrypy.tools.proxy to change this. Finally, note that this function can be used to obtain an absolute URL for the current request path (minus the querystring) by passing no args. If you call url(qs=cherrypy.request.query_string), you should get the original browser URL (assuming no internal redirections). If relative is None or not provided, request.app.relative_urls will be used (if available, else False). If False, the output will be an absolute URL (including the scheme, host, vhost, and script_name). If True, the output will instead be a URL that is relative to the current request path, perhaps including '..' atoms. If relative is the string 'server', the output will instead be a URL that is relative to the server root; i.e., it will start with a slash. """ if isinstance(qs, (tuple, list, dict)): qs = _urlencode(qs) if qs: qs = '?' + qs if request.app: if not path.startswith("/"): # Append/remove trailing slash from path_info as needed # (this is to support mistyped URL's without redirecting; # if you want to redirect, use tools.trailing_slash). pi = request.path_info if request.is_index is True: if not pi.endswith('/'): pi = pi + '/' elif request.is_index is False: if pi.endswith('/') and pi != '/': pi = pi[:-1] if path == "": path = pi else: path = _urljoin(pi, path) if script_name is None: script_name = request.script_name if base is None: base = request.base newurl = base + script_name + path + qs else: # No request.app (we're being called outside a request). # We'll have to guess the base from server.* attributes. # This will produce very different results from the above # if you're using vhosts or tools.proxy. if base is None: base = server.base() path = (script_name or "") + path newurl = base + path + qs if './' in newurl: # Normalize the URL by removing ./ and ../ atoms = [] for atom in newurl.split('/'): if atom == '.': pass elif atom == '..': atoms.pop() else: atoms.append(atom) newurl = '/'.join(atoms) # At this point, we should have a fully-qualified absolute URL. if relative is None: relative = getattr(request.app, "relative_urls", False) # See http://www.ietf.org/rfc/rfc2396.txt if relative == 'server': # "A relative reference beginning with a single slash character is # termed an absolute-path reference, as defined by <abs_path>..." # This is also sometimes called "server-relative". newurl = '/' + '/'.join(newurl.split('/', 3)[3:]) elif relative: # "A relative reference that does not begin with a scheme name # or a slash character is termed a relative-path reference." old = url().split('/')[:-1] new = newurl.split('/') while old and new: a, b = old[0], new[0] if a != b: break old.pop(0) new.pop(0) new = (['..'] * len(old)) + new newurl = '/'.join(new) return newurl # import _cpconfig last so it can reference other top-level objects from cherrypy import _cpconfig # Use _global_conf_alias so quickstart can use 'config' as an arg # without shadowing cherrypy.config. config = _global_conf_alias = _cpconfig.Config() config.defaults = { 'tools.log_tracebacks.on': True, 'tools.log_headers.on': True, 'tools.trailing_slash.on': True, 'tools.encode.on': True } config.namespaces["log"] = lambda k, v: setattr(log, k, v) config.namespaces["checker"] = lambda k, v: setattr(checker, k, v) # Must reset to get our defaults applied. config.reset() from cherrypy import _cpchecker checker = _cpchecker.Checker() engine.subscribe('start', checker)
baslr/ArangoDB
refs/heads/3.1-silent
3rdParty/boost/1.62.0/tools/quickbook/test/snippets/pass_thru.py
60
# Copyright (c) 2011 Daniel James # # Use, modification and distribution is subject to the Boost Software # License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) #[foo_py def foo: #=print('foo') #<- print('bar') #-> #]
ksmaheshkumar/mycli
refs/heads/master
release.py
10
#!/usr/bin/env python from __future__ import print_function import re import ast import subprocess import sys DEBUG = False def version(version_file): _version_re = re.compile(r'__version__\s+=\s+(.*)') with open(version_file, 'rb') as f: ver = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) return ver def commit_for_release(version_file, ver): cmd = ['git', 'reset'] print(' '.join(cmd)) subprocess.check_output(cmd) cmd = ['git', 'add', version_file] print(' '.join(cmd)) subprocess.check_output(cmd) cmd = ['git', 'commit', '--message', 'Releasing version %s' % ver] print(' '.join(cmd)) subprocess.check_output(cmd) def create_git_tag(tag_name): cmd = ['git', 'tag', tag_name] print(' '.join(cmd)) subprocess.check_output(cmd) def register_with_pypi(): cmd = ['python', 'setup.py', 'register'] print(' '.join(cmd)) subprocess.check_output(cmd) def create_source_tarball(): cmd = ['python', 'setup.py', 'sdist'] print(' '.join(cmd)) subprocess.check_output(cmd) def push_to_github(): cmd = ['git', 'push', 'origin', 'master'] print(' '.join(cmd)) subprocess.check_output(cmd) def push_tags_to_github(): cmd = ['git', 'push', '--tags', 'origin'] print(' '.join(cmd)) subprocess.check_output(cmd) if __name__ == '__main__': if DEBUG: subprocess.check_output = lambda x: x choice = raw_input('Have you created the debian package? (y/N)') if choice.lower() != 'y': sys.exit(1) ver = version('mycli/__init__.py') print('Releasing Version:', ver) choice = raw_input('Are you sure? (y/N)') if choice.lower() != 'y': sys.exit(1) commit_for_release('mycli/__init__.py', ver) create_git_tag('v%s' % ver) register_with_pypi() create_source_tarball() push_to_github() push_tags_to_github()
OpnSrcConstruction/OSCbashRCs
refs/heads/master
.ipython/profile_default/ipython_kernel_config.py
2
# Configuration file for ipython-kernel. #------------------------------------------------------------------------------ # ConnectionFileMixin(LoggingConfigurable) configuration #------------------------------------------------------------------------------ ## Mixin for configurable classes that work with connection files ## JSON file in which to store connection info [default: kernel-<pid>.json] # # This file will contain the IP, ports, and authentication key needed to connect # clients to this kernel. By default, this file will be created in the security # dir of the current profile, but can be specified by absolute path. #c.ConnectionFileMixin.connection_file = '' ## set the control (ROUTER) port [default: random] #c.ConnectionFileMixin.control_port = 0 ## set the heartbeat port [default: random] #c.ConnectionFileMixin.hb_port = 0 ## set the iopub (PUB) port [default: random] #c.ConnectionFileMixin.iopub_port = 0 ## Set the kernel's IP address [default localhost]. If the IP address is # something other than localhost, then Consoles on other machines will be able # to connect to the Kernel, so be careful! #c.ConnectionFileMixin.ip = '' ## set the shell (ROUTER) port [default: random] #c.ConnectionFileMixin.shell_port = 0 ## set the stdin (ROUTER) port [default: random] #c.ConnectionFileMixin.stdin_port = 0 ## #c.ConnectionFileMixin.transport = 'tcp' #------------------------------------------------------------------------------ # InteractiveShellApp(Configurable) configuration #------------------------------------------------------------------------------ ## A Mixin for applications that start InteractiveShell instances. # # Provides configurables for loading extensions and executing files as part of # configuring a Shell environment. # # The following methods should be called by the :meth:`initialize` method of the # subclass: # # - :meth:`init_path` # - :meth:`init_shell` (to be implemented by the subclass) # - :meth:`init_gui_pylab` # - :meth:`init_extensions` # - :meth:`init_code` ## Execute the given command string. #c.InteractiveShellApp.code_to_run = '' ## Run the file referenced by the PYTHONSTARTUP environment variable at IPython # startup. #c.InteractiveShellApp.exec_PYTHONSTARTUP = True ## List of files to run at IPython startup. #c.InteractiveShellApp.exec_files = [] ## lines of code to run at IPython startup. #c.InteractiveShellApp.exec_lines = [] ## A list of dotted module names of IPython extensions to load. #c.InteractiveShellApp.extensions = [] ## dotted module name of an IPython extension to load. #c.InteractiveShellApp.extra_extension = '' ## A file to be run #c.InteractiveShellApp.file_to_run = '' ## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3', # 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4'). #c.InteractiveShellApp.gui = None ## Should variables loaded at startup (by startup files, exec_lines, etc.) be # hidden from tools like %who? #c.InteractiveShellApp.hide_initial_ns = True ## Configure matplotlib for interactive use with the default matplotlib backend. #c.InteractiveShellApp.matplotlib = None ## Run the module as a script. #c.InteractiveShellApp.module_to_run = '' ## Pre-load matplotlib and numpy for interactive use, selecting a particular # matplotlib backend and loop integration. #c.InteractiveShellApp.pylab = None ## If true, IPython will populate the user namespace with numpy, pylab, etc. and # an ``import *`` is done from numpy and pylab, when using pylab mode. # # When False, pylab mode should not import any names into the user namespace. #c.InteractiveShellApp.pylab_import_all = True ## Reraise exceptions encountered loading IPython extensions? #c.InteractiveShellApp.reraise_ipython_extension_failures = False #------------------------------------------------------------------------------ # Application(SingletonConfigurable) configuration #------------------------------------------------------------------------------ ## This is an application. ## The date format used by logging formatters for %(asctime)s #c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' ## The Logging format template #c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' ## Set the log level by value or name. #c.Application.log_level = 30 #------------------------------------------------------------------------------ # BaseIPythonApplication(Application) configuration #------------------------------------------------------------------------------ ## IPython: an enhanced interactive Python shell. ## Whether to create profile dir if it doesn't exist #c.BaseIPythonApplication.auto_create = False ## Whether to install the default config files into the profile dir. If a new # profile is being created, and IPython contains config files for that profile, # then they will be staged into the new directory. Otherwise, default config # files will be automatically generated. #c.BaseIPythonApplication.copy_config_files = False ## Path to an extra config file to load. # # If specified, load this config file in addition to any other IPython config. #c.BaseIPythonApplication.extra_config_file = '' ## The name of the IPython directory. This directory is used for logging # configuration (through profiles), history storage, etc. The default is usually # $HOME/.ipython. This option can also be specified through the environment # variable IPYTHONDIR. #c.BaseIPythonApplication.ipython_dir = '' ## Whether to overwrite existing config files when copying #c.BaseIPythonApplication.overwrite = False ## The IPython profile to use. #c.BaseIPythonApplication.profile = 'default' ## Create a massive crash report when IPython encounters what may be an internal # error. The default is to append a short message to the usual traceback #c.BaseIPythonApplication.verbose_crash = False #------------------------------------------------------------------------------ # IPKernelApp(BaseIPythonApplication,InteractiveShellApp,ConnectionFileMixin) configuration #------------------------------------------------------------------------------ ## IPython: an enhanced interactive Python shell. ## The importstring for the DisplayHook factory #c.IPKernelApp.displayhook_class = 'ipykernel.displayhook.ZMQDisplayHook' ## ONLY USED ON WINDOWS Interrupt this process when the parent is signaled. #c.IPKernelApp.interrupt = 0 ## The Kernel subclass to be used. # # This should allow easy re-use of the IPKernelApp entry point to configure and # launch kernels other than IPython's own. #c.IPKernelApp.kernel_class = 'ipykernel.ipkernel.IPythonKernel' ## redirect stderr to the null device #c.IPKernelApp.no_stderr = False ## redirect stdout to the null device #c.IPKernelApp.no_stdout = False ## The importstring for the OutStream factory #c.IPKernelApp.outstream_class = 'ipykernel.iostream.OutStream' ## kill this process if its parent dies. On Windows, the argument specifies the # HANDLE of the parent process, otherwise it is simply boolean. #c.IPKernelApp.parent_handle = 0 #------------------------------------------------------------------------------ # Kernel(SingletonConfigurable) configuration #------------------------------------------------------------------------------ ## Whether to use appnope for compatiblity with OS X App Nap. # # Only affects OS X >= 10.9. #c.Kernel._darwin_app_nap = True ## #c.Kernel._execute_sleep = 0.0005 ## #c.Kernel._poll_interval = 0.05 #------------------------------------------------------------------------------ # IPythonKernel(Kernel) configuration #------------------------------------------------------------------------------ ## #c.IPythonKernel.help_links = [{'url': 'https://docs.python.org/3.5', 'text': 'Python Reference'}, {'url': 'https://ipython.org/documentation.html', 'text': 'IPython Reference'}, {'url': 'https://docs.scipy.org/doc/numpy/reference/', 'text': 'NumPy Reference'}, {'url': 'https://docs.scipy.org/doc/scipy/reference/', 'text': 'SciPy Reference'}, {'url': 'https://matplotlib.org/contents.html', 'text': 'Matplotlib Reference'}, {'url': 'http://docs.sympy.org/latest/index.html', 'text': 'SymPy Reference'}, {'url': 'https://pandas.pydata.org/pandas-docs/stable/', 'text': 'pandas Reference'}] ## Set this flag to False to deactivate the use of experimental IPython # completion APIs. #c.IPythonKernel.use_experimental_completions = True #------------------------------------------------------------------------------ # InteractiveShell(SingletonConfigurable) configuration #------------------------------------------------------------------------------ ## An enhanced, interactive shell for Python. ## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which # nodes should be run interactively (displaying output from expressions). #c.InteractiveShell.ast_node_interactivity = 'last_expr' ## A list of ast.NodeTransformer subclass instances, which will be applied to # user input before code is run. #c.InteractiveShell.ast_transformers = [] ## Make IPython automatically call any callable object even if you didn't type # explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically. # The value can be '0' to disable the feature, '1' for 'smart' autocall, where # it is not applied if there are no more arguments on the line, and '2' for # 'full' autocall, where all callable objects are automatically called (even if # no arguments are present). #c.InteractiveShell.autocall = 0 ## Autoindent IPython code entered interactively. #c.InteractiveShell.autoindent = True ## Enable magic commands to be called without the leading %. #c.InteractiveShell.automagic = True ## The part of the banner to be printed before the profile #c.InteractiveShell.banner1 = "Python 3.5.2 (default, Nov 23 2017, 16:37:01) \nType 'copyright', 'credits' or 'license' for more information\nIPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help.\n" ## The part of the banner to be printed after the profile #c.InteractiveShell.banner2 = '' ## Set the size of the output cache. The default is 1000, you can change it # permanently in your config file. Setting it to 0 completely disables the # caching system, and the minimum value accepted is 3 (if you provide a value # less than 3, it is reset to 0 and a warning is issued). This limit is defined # because otherwise you'll spend more time re-flushing a too small cache than # working #c.InteractiveShell.cache_size = 1000 ## Use colors for displaying information about objects. Because this information # is passed through a pager (like 'less'), and some pagers get confused with # color codes, this capability can be turned off. #c.InteractiveShell.color_info = True ## Set the color scheme (NoColor, Neutral, Linux, or LightBG). #c.InteractiveShell.colors = 'Neutral' ## #c.InteractiveShell.debug = False ## Don't call post-execute functions that have failed in the past. #c.InteractiveShell.disable_failing_post_execute = False ## If True, anything that would be passed to the pager will be displayed as # regular output instead. #c.InteractiveShell.display_page = False ## (Provisional API) enables html representation in mime bundles sent to pagers. #c.InteractiveShell.enable_html_pager = False ## Total length of command history #c.InteractiveShell.history_length = 10000 ## The number of saved history entries to be loaded into the history buffer at # startup. #c.InteractiveShell.history_load_length = 1000 ## #c.InteractiveShell.ipython_dir = '' ## Start logging to the given file in append mode. Use `logfile` to specify a log # file to **overwrite** logs to. #c.InteractiveShell.logappend = '' ## The name of the logfile to use. #c.InteractiveShell.logfile = '' ## Start logging to the default log file in overwrite mode. Use `logappend` to # specify a log file to **append** logs to. #c.InteractiveShell.logstart = False ## #c.InteractiveShell.object_info_string_level = 0 ## Automatically call the pdb debugger after every exception. #c.InteractiveShell.pdb = False ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. #c.InteractiveShell.prompt_in1 = 'In [\\#]: ' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. #c.InteractiveShell.prompt_in2 = ' .\\D.: ' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. #c.InteractiveShell.prompt_out = 'Out[\\#]: ' ## Deprecated since IPython 4.0 and ignored since 5.0, set # TerminalInteractiveShell.prompts object directly. #c.InteractiveShell.prompts_pad_left = True ## #c.InteractiveShell.quiet = False ## #c.InteractiveShell.separate_in = '\n' ## #c.InteractiveShell.separate_out = '' ## #c.InteractiveShell.separate_out2 = '' ## Show rewritten input, e.g. for autocall. #c.InteractiveShell.show_rewritten_input = True ## Enables rich html representation of docstrings. (This requires the docrepr # module). #c.InteractiveShell.sphinxify_docstring = False ## #c.InteractiveShell.wildcards_case_sensitive = True ## Switch modes for the IPython exception handlers. #c.InteractiveShell.xmode = 'Context' #------------------------------------------------------------------------------ # ZMQInteractiveShell(InteractiveShell) configuration #------------------------------------------------------------------------------ ## A subclass of InteractiveShell for ZMQ. #------------------------------------------------------------------------------ # ProfileDir(LoggingConfigurable) configuration #------------------------------------------------------------------------------ ## An object to manage the profile directory and its resources. # # The profile directory is used by all IPython applications, to manage # configuration, logging and security. # # This object knows how to find, create and manage these directories. This # should be used by any code that wants to handle profiles. ## Set the profile location directly. This overrides the logic used by the # `profile` option. #c.ProfileDir.location = '' #------------------------------------------------------------------------------ # Session(Configurable) configuration #------------------------------------------------------------------------------ ## Object for handling serialization and sending of messages. # # The Session object handles building messages and sending them with ZMQ sockets # or ZMQStream objects. Objects can communicate with each other over the # network via Session objects, and only need to work with the dict-based IPython # message spec. The Session will handle serialization/deserialization, security, # and metadata. # # Sessions support configurable serialization via packer/unpacker traits, and # signing with HMAC digests via the key/keyfile traits. # # Parameters ---------- # # debug : bool # whether to trigger extra debugging statements # packer/unpacker : str : 'json', 'pickle' or import_string # importstrings for methods to serialize message parts. If just # 'json' or 'pickle', predefined JSON and pickle packers will be used. # Otherwise, the entire importstring must be used. # # The functions must accept at least valid JSON input, and output *bytes*. # # For example, to use msgpack: # packer = 'msgpack.packb', unpacker='msgpack.unpackb' # pack/unpack : callables # You can also set the pack/unpack callables for serialization directly. # session : bytes # the ID of this Session object. The default is to generate a new UUID. # username : unicode # username added to message headers. The default is to ask the OS. # key : bytes # The key used to initialize an HMAC signature. If unset, messages # will not be signed or checked. # keyfile : filepath # The file containing a key. If this is set, `key` will be initialized # to the contents of the file. ## Threshold (in bytes) beyond which an object's buffer should be extracted to # avoid pickling. #c.Session.buffer_threshold = 1024 ## Whether to check PID to protect against calls after fork. # # This check can be disabled if fork-safety is handled elsewhere. #c.Session.check_pid = True ## Threshold (in bytes) beyond which a buffer should be sent without copying. #c.Session.copy_threshold = 65536 ## Debug output in the Session #c.Session.debug = False ## The maximum number of digests to remember. # # The digest history will be culled when it exceeds this value. #c.Session.digest_history_size = 65536 ## The maximum number of items for a container to be introspected for custom # serialization. Containers larger than this are pickled outright. #c.Session.item_threshold = 64 ## execution key, for signing messages. #c.Session.key = b'' ## path to file containing execution key. #c.Session.keyfile = '' ## Metadata dictionary, which serves as the default top-level metadata dict for # each message. #c.Session.metadata = {} ## The name of the packer for serializing messages. Should be one of 'json', # 'pickle', or an import name for a custom callable serializer. #c.Session.packer = 'json' ## The UUID identifying this session. #c.Session.session = '' ## The digest scheme used to construct the message signatures. Must have the form # 'hmac-HASH'. #c.Session.signature_scheme = 'hmac-sha256' ## The name of the unpacker for unserializing messages. Only used with custom # functions for `packer`. #c.Session.unpacker = 'json' ## Username for the Session. Default is your system username. #c.Session.username = 'zim'
theheros/kbengine
refs/heads/master
kbe/res/scripts/common/Lib/encodings/mac_farsi.py
37
""" Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='mac-farsi', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> CONTROL CHARACTER '\x01' # 0x01 -> CONTROL CHARACTER '\x02' # 0x02 -> CONTROL CHARACTER '\x03' # 0x03 -> CONTROL CHARACTER '\x04' # 0x04 -> CONTROL CHARACTER '\x05' # 0x05 -> CONTROL CHARACTER '\x06' # 0x06 -> CONTROL CHARACTER '\x07' # 0x07 -> CONTROL CHARACTER '\x08' # 0x08 -> CONTROL CHARACTER '\t' # 0x09 -> CONTROL CHARACTER '\n' # 0x0A -> CONTROL CHARACTER '\x0b' # 0x0B -> CONTROL CHARACTER '\x0c' # 0x0C -> CONTROL CHARACTER '\r' # 0x0D -> CONTROL CHARACTER '\x0e' # 0x0E -> CONTROL CHARACTER '\x0f' # 0x0F -> CONTROL CHARACTER '\x10' # 0x10 -> CONTROL CHARACTER '\x11' # 0x11 -> CONTROL CHARACTER '\x12' # 0x12 -> CONTROL CHARACTER '\x13' # 0x13 -> CONTROL CHARACTER '\x14' # 0x14 -> CONTROL CHARACTER '\x15' # 0x15 -> CONTROL CHARACTER '\x16' # 0x16 -> CONTROL CHARACTER '\x17' # 0x17 -> CONTROL CHARACTER '\x18' # 0x18 -> CONTROL CHARACTER '\x19' # 0x19 -> CONTROL CHARACTER '\x1a' # 0x1A -> CONTROL CHARACTER '\x1b' # 0x1B -> CONTROL CHARACTER '\x1c' # 0x1C -> CONTROL CHARACTER '\x1d' # 0x1D -> CONTROL CHARACTER '\x1e' # 0x1E -> CONTROL CHARACTER '\x1f' # 0x1F -> CONTROL CHARACTER ' ' # 0x20 -> SPACE, left-right '!' # 0x21 -> EXCLAMATION MARK, left-right '"' # 0x22 -> QUOTATION MARK, left-right '#' # 0x23 -> NUMBER SIGN, left-right '$' # 0x24 -> DOLLAR SIGN, left-right '%' # 0x25 -> PERCENT SIGN, left-right '&' # 0x26 -> AMPERSAND, left-right "'" # 0x27 -> APOSTROPHE, left-right '(' # 0x28 -> LEFT PARENTHESIS, left-right ')' # 0x29 -> RIGHT PARENTHESIS, left-right '*' # 0x2A -> ASTERISK, left-right '+' # 0x2B -> PLUS SIGN, left-right ',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR '-' # 0x2D -> HYPHEN-MINUS, left-right '.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR '/' # 0x2F -> SOLIDUS, left-right '0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO '1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE '2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO '3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE '4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR '5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE '6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX '7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT '9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE ':' # 0x3A -> COLON, left-right ';' # 0x3B -> SEMICOLON, left-right '<' # 0x3C -> LESS-THAN SIGN, left-right '=' # 0x3D -> EQUALS SIGN, left-right '>' # 0x3E -> GREATER-THAN SIGN, left-right '?' # 0x3F -> QUESTION MARK, left-right '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET, left-right '\\' # 0x5C -> REVERSE SOLIDUS, left-right ']' # 0x5D -> RIGHT SQUARE BRACKET, left-right '^' # 0x5E -> CIRCUMFLEX ACCENT, left-right '_' # 0x5F -> LOW LINE, left-right '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET, left-right '|' # 0x7C -> VERTICAL LINE, left-right '}' # 0x7D -> RIGHT CURLY BRACKET, left-right '~' # 0x7E -> TILDE '\x7f' # 0x7F -> CONTROL CHARACTER '\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xa0' # 0x81 -> NO-BREAK SPACE, right-left '\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE '\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE '\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE '\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE '\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS '\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA '\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left '\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA '\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE '\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE '\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS '\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE '\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left '\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS '\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE '\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE '\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left '\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS '\xf7' # 0x9B -> DIVISION SIGN, right-left '\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE '\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE '\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS ' ' # 0xA0 -> SPACE, right-left '!' # 0xA1 -> EXCLAMATION MARK, right-left '"' # 0xA2 -> QUOTATION MARK, right-left '#' # 0xA3 -> NUMBER SIGN, right-left '$' # 0xA4 -> DOLLAR SIGN, right-left '\u066a' # 0xA5 -> ARABIC PERCENT SIGN '&' # 0xA6 -> AMPERSAND, right-left "'" # 0xA7 -> APOSTROPHE, right-left '(' # 0xA8 -> LEFT PARENTHESIS, right-left ')' # 0xA9 -> RIGHT PARENTHESIS, right-left '*' # 0xAA -> ASTERISK, right-left '+' # 0xAB -> PLUS SIGN, right-left '\u060c' # 0xAC -> ARABIC COMMA '-' # 0xAD -> HYPHEN-MINUS, right-left '.' # 0xAE -> FULL STOP, right-left '/' # 0xAF -> SOLIDUS, right-left '\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override) '\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override) '\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override) '\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override) '\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override) '\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override) '\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override) '\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override) '\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override) '\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override) ':' # 0xBA -> COLON, right-left '\u061b' # 0xBB -> ARABIC SEMICOLON '<' # 0xBC -> LESS-THAN SIGN, right-left '=' # 0xBD -> EQUALS SIGN, right-left '>' # 0xBE -> GREATER-THAN SIGN, right-left '\u061f' # 0xBF -> ARABIC QUESTION MARK '\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left '\u0621' # 0xC1 -> ARABIC LETTER HAMZA '\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE '\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE '\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE '\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW '\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0xC7 -> ARABIC LETTER ALEF '\u0628' # 0xC8 -> ARABIC LETTER BEH '\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA '\u062a' # 0xCA -> ARABIC LETTER TEH '\u062b' # 0xCB -> ARABIC LETTER THEH '\u062c' # 0xCC -> ARABIC LETTER JEEM '\u062d' # 0xCD -> ARABIC LETTER HAH '\u062e' # 0xCE -> ARABIC LETTER KHAH '\u062f' # 0xCF -> ARABIC LETTER DAL '\u0630' # 0xD0 -> ARABIC LETTER THAL '\u0631' # 0xD1 -> ARABIC LETTER REH '\u0632' # 0xD2 -> ARABIC LETTER ZAIN '\u0633' # 0xD3 -> ARABIC LETTER SEEN '\u0634' # 0xD4 -> ARABIC LETTER SHEEN '\u0635' # 0xD5 -> ARABIC LETTER SAD '\u0636' # 0xD6 -> ARABIC LETTER DAD '\u0637' # 0xD7 -> ARABIC LETTER TAH '\u0638' # 0xD8 -> ARABIC LETTER ZAH '\u0639' # 0xD9 -> ARABIC LETTER AIN '\u063a' # 0xDA -> ARABIC LETTER GHAIN '[' # 0xDB -> LEFT SQUARE BRACKET, right-left '\\' # 0xDC -> REVERSE SOLIDUS, right-left ']' # 0xDD -> RIGHT SQUARE BRACKET, right-left '^' # 0xDE -> CIRCUMFLEX ACCENT, right-left '_' # 0xDF -> LOW LINE, right-left '\u0640' # 0xE0 -> ARABIC TATWEEL '\u0641' # 0xE1 -> ARABIC LETTER FEH '\u0642' # 0xE2 -> ARABIC LETTER QAF '\u0643' # 0xE3 -> ARABIC LETTER KAF '\u0644' # 0xE4 -> ARABIC LETTER LAM '\u0645' # 0xE5 -> ARABIC LETTER MEEM '\u0646' # 0xE6 -> ARABIC LETTER NOON '\u0647' # 0xE7 -> ARABIC LETTER HEH '\u0648' # 0xE8 -> ARABIC LETTER WAW '\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA '\u064a' # 0xEA -> ARABIC LETTER YEH '\u064b' # 0xEB -> ARABIC FATHATAN '\u064c' # 0xEC -> ARABIC DAMMATAN '\u064d' # 0xED -> ARABIC KASRATAN '\u064e' # 0xEE -> ARABIC FATHA '\u064f' # 0xEF -> ARABIC DAMMA '\u0650' # 0xF0 -> ARABIC KASRA '\u0651' # 0xF1 -> ARABIC SHADDA '\u0652' # 0xF2 -> ARABIC SUKUN '\u067e' # 0xF3 -> ARABIC LETTER PEH '\u0679' # 0xF4 -> ARABIC LETTER TTEH '\u0686' # 0xF5 -> ARABIC LETTER TCHEH '\u06d5' # 0xF6 -> ARABIC LETTER AE '\u06a4' # 0xF7 -> ARABIC LETTER VEH '\u06af' # 0xF8 -> ARABIC LETTER GAF '\u0688' # 0xF9 -> ARABIC LETTER DDAL '\u0691' # 0xFA -> ARABIC LETTER RREH '{' # 0xFB -> LEFT CURLY BRACKET, right-left '|' # 0xFC -> VERTICAL LINE, right-left '}' # 0xFD -> RIGHT CURLY BRACKET, right-left '\u0698' # 0xFE -> ARABIC LETTER JEH '\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
wfxiang08/django197
refs/heads/master
django/contrib/gis/gdal/raster/source.py
297
import json import os from ctypes import addressof, byref, c_double, c_void_p from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.driver import Driver from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.prototypes import raster as capi from django.contrib.gis.gdal.raster.band import BandList from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS from django.contrib.gis.gdal.srs import SpatialReference, SRSException from django.contrib.gis.geometry.regex import json_regex from django.utils import six from django.utils.encoding import ( force_bytes, force_text, python_2_unicode_compatible, ) from django.utils.functional import cached_property class TransformPoint(list): indices = { 'origin': (0, 3), 'scale': (1, 5), 'skew': (2, 4), } def __init__(self, raster, prop): x = raster.geotransform[self.indices[prop][0]] y = raster.geotransform[self.indices[prop][1]] list.__init__(self, [x, y]) self._raster = raster self._prop = prop @property def x(self): return self[0] @x.setter def x(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][0]] = value self._raster.geotransform = gtf @property def y(self): return self[1] @y.setter def y(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][1]] = value self._raster.geotransform = gtf @python_2_unicode_compatible class GDALRaster(GDALBase): """ Wraps a raster GDAL Data Source object. """ def __init__(self, ds_input, write=False): self._write = 1 if write else 0 Driver.ensure_registered() # Preprocess json inputs. This converts json strings to dictionaries, # which are parsed below the same way as direct dictionary inputs. if isinstance(ds_input, six.string_types) and json_regex.match(ds_input): ds_input = json.loads(ds_input) # If input is a valid file path, try setting file as source. if isinstance(ds_input, six.string_types): if not os.path.exists(ds_input): raise GDALException('Unable to read raster source input "{}"'.format(ds_input)) try: # GDALOpen will auto-detect the data source type. self._ptr = capi.open_ds(force_bytes(ds_input), self._write) except GDALException as err: raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err)) elif isinstance(ds_input, dict): # A new raster needs to be created in write mode self._write = 1 # Create driver (in memory by default) driver = Driver(ds_input.get('driver', 'MEM')) # For out of memory drivers, check filename argument if driver.name != 'MEM' and 'name' not in ds_input: raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name)) # Check if width and height where specified if 'width' not in ds_input or 'height' not in ds_input: raise GDALException('Specify width and height attributes for JSON or dict input.') # Check if srid was specified if 'srid' not in ds_input: raise GDALException('Specify srid for JSON or dict input.') # Create GDAL Raster self._ptr = capi.create_ds( driver._ptr, force_bytes(ds_input.get('name', '')), ds_input['width'], ds_input['height'], ds_input.get('nr_of_bands', len(ds_input.get('bands', []))), ds_input.get('datatype', 6), None ) # Set band data if provided for i, band_input in enumerate(ds_input.get('bands', [])): band = self.bands[i] band.data(band_input['data']) if 'nodata_value' in band_input: band.nodata_value = band_input['nodata_value'] # Set SRID self.srs = ds_input.get('srid') # Set additional properties if provided if 'origin' in ds_input: self.origin.x, self.origin.y = ds_input['origin'] if 'scale' in ds_input: self.scale.x, self.scale.y = ds_input['scale'] if 'skew' in ds_input: self.skew.x, self.skew.y = ds_input['skew'] elif isinstance(ds_input, c_void_p): # Instantiate the object using an existing pointer to a gdal raster. self._ptr = ds_input else: raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input))) def __del__(self): if self._ptr and capi: capi.close_ds(self._ptr) def __str__(self): return self.name def __repr__(self): """ Short-hand representation because WKB may be very large. """ return '<Raster object at %s>' % hex(addressof(self._ptr)) def _flush(self): """ Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed. """ # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException('Raster needs to be opened in write mode to change values.') capi.flush_ds(self._ptr) @property def name(self): """ Returns the name of this raster. Corresponds to filename for file-based rasters. """ return force_text(capi.get_ds_description(self._ptr)) @cached_property def driver(self): """ Returns the GDAL Driver used for this raster. """ ds_driver = capi.get_ds_driver(self._ptr) return Driver(ds_driver) @property def width(self): """ Width (X axis) in pixels. """ return capi.get_ds_xsize(self._ptr) @property def height(self): """ Height (Y axis) in pixels. """ return capi.get_ds_ysize(self._ptr) @property def srs(self): """ Returns the SpatialReference used in this GDALRaster. """ try: wkt = capi.get_ds_projection_ref(self._ptr) if not wkt: return None return SpatialReference(wkt, srs_type='wkt') except SRSException: return None @srs.setter def srs(self, value): """ Sets the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor. """ if isinstance(value, SpatialReference): srs = value elif isinstance(value, six.integer_types + six.string_types): srs = SpatialReference(value) else: raise ValueError('Could not create a SpatialReference from input.') capi.set_ds_projection_ref(self._ptr, srs.wkt.encode()) self._flush() @property def geotransform(self): """ Returns the geotransform of the data source. Returns the default geotransform if it does not exist or has not been set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0]. """ # Create empty ctypes double array for data gtf = (c_double * 6)() capi.get_ds_geotransform(self._ptr, byref(gtf)) return list(gtf) @geotransform.setter def geotransform(self, values): "Sets the geotransform for the data source." if sum([isinstance(x, (int, float)) for x in values]) != 6: raise ValueError('Geotransform must consist of 6 numeric values.') # Create ctypes double array with input and write data values = (c_double * 6)(*values) capi.set_ds_geotransform(self._ptr, byref(values)) self._flush() @property def origin(self): """ Coordinates of the raster origin. """ return TransformPoint(self, 'origin') @property def scale(self): """ Pixel scale in units of the raster projection. """ return TransformPoint(self, 'scale') @property def skew(self): """ Skew of pixels (rotation parameters). """ return TransformPoint(self, 'skew') @property def extent(self): """ Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax). """ # Calculate boundary values based on scale and size xval = self.origin.x + self.scale.x * self.width yval = self.origin.y + self.scale.y * self.height # Calculate min and max values xmin = min(xval, self.origin.x) xmax = max(xval, self.origin.x) ymin = min(yval, self.origin.y) ymax = max(yval, self.origin.y) return xmin, ymin, xmax, ymax @property def bands(self): return BandList(self) def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0): """ Returns a warped GDALRaster with the given input characteristics. The input is expected to be a dictionary containing the parameters of the target raster. Allowed values are width, height, SRID, origin, scale, skew, datatype, driver, and name (filename). By default, the warp functions keeps all parameters equal to the values of the original source raster. For the name of the target raster, the name of the source raster will be used and appended with _copy. + source_driver_name. In addition, the resampling algorithm can be specified with the "resampling" input parameter. The default is NearestNeighbor. For a list of all options consult the GDAL_RESAMPLE_ALGORITHMS constant. """ # Get the parameters defining the geotransform, srid, and size of the raster if 'width' not in ds_input: ds_input['width'] = self.width if 'height' not in ds_input: ds_input['height'] = self.height if 'srid' not in ds_input: ds_input['srid'] = self.srs.srid if 'origin' not in ds_input: ds_input['origin'] = self.origin if 'scale' not in ds_input: ds_input['scale'] = self.scale if 'skew' not in ds_input: ds_input['skew'] = self.skew # Get the driver, name, and datatype of the target raster if 'driver' not in ds_input: ds_input['driver'] = self.driver.name if 'name' not in ds_input: ds_input['name'] = self.name + '_copy.' + self.driver.name if 'datatype' not in ds_input: ds_input['datatype'] = self.bands[0].datatype() # Set the number of bands ds_input['nr_of_bands'] = len(self.bands) # Create target raster target = GDALRaster(ds_input, write=True) # Copy nodata values to warped raster for index, band in enumerate(self.bands): target.bands[index].nodata_value = band.nodata_value # Select resampling algorithm algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Reproject image capi.reproject_image( self._ptr, self.srs.wkt.encode(), target._ptr, target.srs.wkt.encode(), algorithm, 0.0, max_error, c_void_p(), c_void_p(), c_void_p() ) # Make sure all data is written to file target._flush() return target def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour', max_error=0.0): """ Returns a copy of this raster reprojected into the given SRID. """ # Convert the resampling algorithm name into an algorithm id algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Instantiate target spatial reference system target_srs = SpatialReference(srid) # Create warped virtual dataset in the target reference system target = capi.auto_create_warped_vrt( self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(), algorithm, max_error, c_void_p() ) target = GDALRaster(target) # Construct the target warp dictionary from the virtual raster data = { 'srid': srid, 'width': target.width, 'height': target.height, 'origin': [target.origin.x, target.origin.y], 'scale': [target.scale.x, target.scale.y], 'skew': [target.skew.x, target.skew.y], } # Set the driver and filepath if provided if driver: data['driver'] = driver if name: data['name'] = name # Warp the raster into new srid return self.warp(data, resampling=resampling, max_error=max_error)
ShineFan/odoo
refs/heads/8.0
openerp/addons/base/ir/ir_qweb.py
127
# -*- coding: utf-8 -*- import collections import cStringIO import datetime import hashlib import json import itertools import logging import math import os import re import sys import textwrap import uuid from subprocess import Popen, PIPE from urlparse import urlparse import babel import babel.dates import werkzeug from lxml import etree, html from PIL import Image import psycopg2 import openerp.http import openerp.tools from openerp.tools.func import lazy_property import openerp.tools.lru from openerp.http import request from openerp.tools.safe_eval import safe_eval as eval from openerp.osv import osv, orm, fields from openerp.tools import html_escape as escape from openerp.tools.translate import _ _logger = logging.getLogger(__name__) MAX_CSS_RULES = 4095 #-------------------------------------------------------------------- # QWeb template engine #-------------------------------------------------------------------- class QWebException(Exception): def __init__(self, message, **kw): Exception.__init__(self, message) self.qweb = dict(kw) def pretty_xml(self): if 'node' not in self.qweb: return '' return etree.tostring(self.qweb['node'], pretty_print=True) class QWebTemplateNotFound(QWebException): pass def raise_qweb_exception(etype=None, **kw): if etype is None: etype = QWebException orig_type, original, tb = sys.exc_info() try: raise etype, original, tb except etype, e: for k, v in kw.items(): e.qweb[k] = v # Will use `raise foo from bar` in python 3 and rename cause to __cause__ e.qweb['cause'] = original raise def _build_attribute(name, value): value = escape(value) if isinstance(name, unicode): name = name.encode('utf-8') if isinstance(value, unicode): value = value.encode('utf-8') return ' %s="%s"' % (name, value) class QWebContext(dict): def __init__(self, cr, uid, data, loader=None, templates=None, context=None): self.cr = cr self.uid = uid self.loader = loader self.templates = templates or {} self.context = context dic = dict(data) super(QWebContext, self).__init__(dic) self['defined'] = lambda key: key in self def safe_eval(self, expr): locals_dict = collections.defaultdict(lambda: None) locals_dict.update(self) locals_dict.pop('cr', None) locals_dict.pop('loader', None) return eval(expr, None, locals_dict, nocopy=True, locals_builtins=True) def copy(self): """ Clones the current context, conserving all data and metadata (loader, template cache, ...) """ return QWebContext(self.cr, self.uid, dict.copy(self), loader=self.loader, templates=self.templates, context=self.context) def __copy__(self): return self.copy() class QWeb(orm.AbstractModel): """ Base QWeb rendering engine * to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and create new models called :samp:`ir.qweb.field.{widget}` * alternatively, override :meth:`~.get_converter_for` and return an arbitrary model to use as field converter Beware that if you need extensions or alterations which could be incompatible with other subsystems, you should create a local object inheriting from ``ir.qweb`` and customize that. """ _name = 'ir.qweb' _void_elements = frozenset([ 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr']) _format_regex = re.compile( '(?:' # ruby-style pattern '#\{(.+?)\}' ')|(?:' # jinja-style pattern '\{\{(.+?)\}\}' ')') def __init__(self, pool, cr): super(QWeb, self).__init__(pool, cr) self._render_tag = self.prefixed_methods('render_tag_') self._render_att = self.prefixed_methods('render_att_') def prefixed_methods(self, prefix): """ Extracts all methods prefixed by ``prefix``, and returns a mapping of (t-name, method) where the t-name is the method name with prefix removed and underscore converted to dashes :param str prefix: :return: dict """ n_prefix = len(prefix) return dict( (name[n_prefix:].replace('_', '-'), getattr(type(self), name)) for name in dir(self) if name.startswith(prefix) ) def register_tag(self, tag, func): self._render_tag[tag] = func def add_template(self, qwebcontext, name, node): """Add a parsed template in the context. Used to preprocess templates.""" qwebcontext.templates[name] = node def load_document(self, document, res_id, qwebcontext): """ Loads an XML document and installs any contained template in the engine :type document: a parsed lxml.etree element, an unparsed XML document (as a string) or the path of an XML file to load """ if not isinstance(document, basestring): # assume lxml.etree.Element dom = document elif document.startswith("<?xml"): dom = etree.fromstring(document) else: dom = etree.parse(document).getroot() for node in dom: if node.get('t-name'): name = str(node.get("t-name")) self.add_template(qwebcontext, name, node) if res_id and node.tag == "t": self.add_template(qwebcontext, res_id, node) res_id = None def get_template(self, name, qwebcontext): """ Tries to fetch the template ``name``, either gets it from the context's template cache or loads one with the context's loader (if any). :raises QWebTemplateNotFound: if the template can not be found or loaded """ origin_template = qwebcontext.get('__caller__') or qwebcontext['__stack__'][0] if qwebcontext.loader and name not in qwebcontext.templates: try: xml_doc = qwebcontext.loader(name) except ValueError: raise_qweb_exception(QWebTemplateNotFound, message="Loader could not find template %r" % name, template=origin_template) self.load_document(xml_doc, isinstance(name, (int, long)) and name or None, qwebcontext=qwebcontext) if name in qwebcontext.templates: return qwebcontext.templates[name] raise QWebTemplateNotFound("Template %r not found" % name, template=origin_template) def eval(self, expr, qwebcontext): try: return qwebcontext.safe_eval(expr) except Exception: template = qwebcontext.get('__template__') raise_qweb_exception(message="Could not evaluate expression %r" % expr, expression=expr, template=template) def eval_object(self, expr, qwebcontext): return self.eval(expr, qwebcontext) def eval_str(self, expr, qwebcontext): if expr == "0": return qwebcontext.get(0, '') val = self.eval(expr, qwebcontext) if isinstance(val, unicode): return val.encode("utf8") if val is False or val is None: return '' return str(val) def eval_format(self, expr, qwebcontext): expr, replacements = self._format_regex.subn( lambda m: self.eval_str(m.group(1) or m.group(2), qwebcontext), expr ) if replacements: return expr try: return str(expr % qwebcontext) except Exception: template = qwebcontext.get('__template__') raise_qweb_exception(message="Format error for expression %r" % expr, expression=expr, template=template) def eval_bool(self, expr, qwebcontext): return int(bool(self.eval(expr, qwebcontext))) def render(self, cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None): """ render(cr, uid, id_or_xml_id, qwebcontext=None, loader=None, context=None) Renders the template specified by the provided template name :param qwebcontext: context for rendering the template :type qwebcontext: dict or :class:`QWebContext` instance :param loader: if ``qwebcontext`` is a dict, loader set into the context instantiated for rendering """ if qwebcontext is None: qwebcontext = {} if not isinstance(qwebcontext, QWebContext): qwebcontext = QWebContext(cr, uid, qwebcontext, loader=loader, context=context) qwebcontext['__template__'] = id_or_xml_id stack = qwebcontext.get('__stack__', []) if stack: qwebcontext['__caller__'] = stack[-1] stack.append(id_or_xml_id) qwebcontext['__stack__'] = stack qwebcontext['xmlid'] = str(stack[0]) # Temporary fix return self.render_node(self.get_template(id_or_xml_id, qwebcontext), qwebcontext) def render_node(self, element, qwebcontext): generated_attributes = "" t_render = None template_attributes = {} for (attribute_name, attribute_value) in element.attrib.iteritems(): attribute_name = str(attribute_name) if attribute_name == "groups": cr = qwebcontext.get('request') and qwebcontext['request'].cr or None uid = qwebcontext.get('request') and qwebcontext['request'].uid or None can_see = self.user_has_groups(cr, uid, groups=attribute_value) if cr and uid else False if not can_see: return '' attribute_value = attribute_value.encode("utf8") if attribute_name.startswith("t-"): for attribute in self._render_att: if attribute_name[2:].startswith(attribute): attrs = self._render_att[attribute]( self, element, attribute_name, attribute_value, qwebcontext) for att, val in attrs: if not val: continue generated_attributes += self.render_attribute(element, att, val, qwebcontext) break else: if attribute_name[2:] in self._render_tag: t_render = attribute_name[2:] template_attributes[attribute_name[2:]] = attribute_value else: generated_attributes += self.render_attribute(element, attribute_name, attribute_value, qwebcontext) if 'debug' in template_attributes: debugger = template_attributes.get('debug', 'pdb') __import__(debugger).set_trace() # pdb, ipdb, pudb, ... if t_render: result = self._render_tag[t_render](self, element, template_attributes, generated_attributes, qwebcontext) else: result = self.render_element(element, template_attributes, generated_attributes, qwebcontext) if element.tail: result += element.tail.encode('utf-8') if isinstance(result, unicode): return result.encode('utf-8') return result def render_element(self, element, template_attributes, generated_attributes, qwebcontext, inner=None): # element: element # template_attributes: t-* attributes # generated_attributes: generated attributes # qwebcontext: values # inner: optional innerXml if inner: g_inner = inner.encode('utf-8') if isinstance(inner, unicode) else inner else: g_inner = [] if element.text is None else [element.text.encode('utf-8')] for current_node in element.iterchildren(tag=etree.Element): try: g_inner.append(self.render_node(current_node, qwebcontext)) except QWebException: raise except Exception: template = qwebcontext.get('__template__') raise_qweb_exception(message="Could not render element %r" % element.tag, node=element, template=template) name = str(element.tag) inner = "".join(g_inner) trim = template_attributes.get("trim", 0) if trim == 0: pass elif trim == 'left': inner = inner.lstrip() elif trim == 'right': inner = inner.rstrip() elif trim == 'both': inner = inner.strip() if name == "t": return inner elif len(inner) or name not in self._void_elements: return "<%s%s>%s</%s>" % tuple( qwebcontext if isinstance(qwebcontext, str) else qwebcontext.encode('utf-8') for qwebcontext in (name, generated_attributes, inner, name) ) else: return "<%s%s/>" % (name, generated_attributes) def render_attribute(self, element, name, value, qwebcontext): return _build_attribute(name, value) # Attributes def render_att_att(self, element, attribute_name, attribute_value, qwebcontext): if attribute_name.startswith("t-attf-"): return [(attribute_name[7:], self.eval_format(attribute_value, qwebcontext))] if attribute_name.startswith("t-att-"): return [(attribute_name[6:], self.eval(attribute_value, qwebcontext))] result = self.eval_object(attribute_value, qwebcontext) if isinstance(result, collections.Mapping): return result.iteritems() # assume tuple return [result] # Tags def render_tag_raw(self, element, template_attributes, generated_attributes, qwebcontext): inner = self.eval_str(template_attributes["raw"], qwebcontext) return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner) def render_tag_esc(self, element, template_attributes, generated_attributes, qwebcontext): options = json.loads(template_attributes.get('esc-options') or '{}') widget = self.get_widget_for(options.get('widget')) inner = widget.format(template_attributes['esc'], options, qwebcontext) return self.render_element(element, template_attributes, generated_attributes, qwebcontext, inner) def _iterate(self, iterable): if isinstance (iterable, collections.Mapping): return iterable.iteritems() return itertools.izip(*itertools.tee(iterable)) def render_tag_foreach(self, element, template_attributes, generated_attributes, qwebcontext): expr = template_attributes["foreach"] enum = self.eval_object(expr, qwebcontext) if enum is None: template = qwebcontext.get('__template__') raise QWebException("foreach enumerator %r is not defined while rendering template %r" % (expr, template), template=template) if isinstance(enum, int): enum = range(enum) varname = template_attributes['as'].replace('.', '_') copy_qwebcontext = qwebcontext.copy() size = None if isinstance(enum, collections.Sized): size = len(enum) copy_qwebcontext["%s_size" % varname] = size copy_qwebcontext["%s_all" % varname] = enum ru = [] for index, (item, value) in enumerate(self._iterate(enum)): copy_qwebcontext.update({ varname: item, '%s_value' % varname: value, '%s_index' % varname: index, '%s_first' % varname: index == 0, }) if size is not None: copy_qwebcontext['%s_last' % varname] = index + 1 == size if index % 2: copy_qwebcontext.update({ '%s_parity' % varname: 'odd', '%s_even' % varname: False, '%s_odd' % varname: True, }) else: copy_qwebcontext.update({ '%s_parity' % varname: 'even', '%s_even' % varname: True, '%s_odd' % varname: False, }) ru.append(self.render_element(element, template_attributes, generated_attributes, copy_qwebcontext)) for k in qwebcontext.keys(): qwebcontext[k] = copy_qwebcontext[k] return "".join(ru) def render_tag_if(self, element, template_attributes, generated_attributes, qwebcontext): if self.eval_bool(template_attributes["if"], qwebcontext): return self.render_element(element, template_attributes, generated_attributes, qwebcontext) return "" def render_tag_call(self, element, template_attributes, generated_attributes, qwebcontext): d = qwebcontext.copy() d[0] = self.render_element(element, template_attributes, generated_attributes, d) cr = d.get('request') and d['request'].cr or None uid = d.get('request') and d['request'].uid or None template = self.eval_format(template_attributes["call"], d) try: template = int(template) except ValueError: pass return self.render(cr, uid, template, d) def render_tag_call_assets(self, element, template_attributes, generated_attributes, qwebcontext): """ This special 't-call' tag can be used in order to aggregate/minify javascript and css assets""" if len(element): # An asset bundle is rendered in two differents contexts (when genereting html and # when generating the bundle itself) so they must be qwebcontext free # even '0' variable is forbidden template = qwebcontext.get('__template__') raise QWebException("t-call-assets cannot contain children nodes", template=template) xmlid = template_attributes['call-assets'] cr, uid, context = [getattr(qwebcontext, attr) for attr in ('cr', 'uid', 'context')] bundle = AssetsBundle(xmlid, cr=cr, uid=uid, context=context, registry=self.pool) css = self.get_attr_bool(template_attributes.get('css'), default=True) js = self.get_attr_bool(template_attributes.get('js'), default=True) return bundle.to_html(css=css, js=js, debug=bool(qwebcontext.get('debug'))) def render_tag_set(self, element, template_attributes, generated_attributes, qwebcontext): if "value" in template_attributes: qwebcontext[template_attributes["set"]] = self.eval_object(template_attributes["value"], qwebcontext) elif "valuef" in template_attributes: qwebcontext[template_attributes["set"]] = self.eval_format(template_attributes["valuef"], qwebcontext) else: qwebcontext[template_attributes["set"]] = self.render_element(element, template_attributes, generated_attributes, qwebcontext) return "" def render_tag_field(self, element, template_attributes, generated_attributes, qwebcontext): """ eg: <span t-record="browse_record(res.partner, 1)" t-field="phone">+1 555 555 8069</span>""" node_name = element.tag assert node_name not in ("table", "tbody", "thead", "tfoot", "tr", "td", "li", "ul", "ol", "dl", "dt", "dd"),\ "RTE widgets do not work correctly on %r elements" % node_name assert node_name != 't',\ "t-field can not be used on a t element, provide an actual HTML node" record, field_name = template_attributes["field"].rsplit('.', 1) record = self.eval_object(record, qwebcontext) field = record._fields[field_name] options = json.loads(template_attributes.get('field-options') or '{}') field_type = get_field_type(field, options) converter = self.get_converter_for(field_type) return converter.to_html(qwebcontext.cr, qwebcontext.uid, field_name, record, options, element, template_attributes, generated_attributes, qwebcontext, context=qwebcontext.context) def get_converter_for(self, field_type): """ returns a :class:`~openerp.models.Model` used to render a ``t-field``. By default, tries to get the model named :samp:`ir.qweb.field.{field_type}`, falling back on ``ir.qweb.field``. :param str field_type: type or widget of field to render """ return self.pool.get('ir.qweb.field.' + field_type, self.pool['ir.qweb.field']) def get_widget_for(self, widget): """ returns a :class:`~openerp.models.Model` used to render a ``t-esc`` :param str widget: name of the widget to use, or ``None`` """ widget_model = ('ir.qweb.widget.' + widget) if widget else 'ir.qweb.widget' return self.pool.get(widget_model) or self.pool['ir.qweb.widget'] def get_attr_bool(self, attr, default=False): if attr: attr = attr.lower() if attr in ('false', '0'): return False elif attr in ('true', '1'): return True return default #-------------------------------------------------------------------- # QWeb Fields converters #-------------------------------------------------------------------- class FieldConverter(osv.AbstractModel): """ Used to convert a t-field specification into an output HTML field. :meth:`~.to_html` is the entry point of this conversion from QWeb, it: * converts the record value to html using :meth:`~.record_to_html` * generates the metadata attributes (``data-oe-``) to set on the root result node * generates the root result node itself through :meth:`~.render_element` """ _name = 'ir.qweb.field' def attributes(self, cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=None): """ attributes(cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=None) Generates the metadata attributes (prefixed by ``data-oe-`` for the root node of the field conversion. Attribute values are escaped by the parent. The default attributes are: * ``model``, the name of the record's model * ``id`` the id of the record to which the field belongs * ``field`` the name of the converted field * ``type`` the logical field type (widget, may not match the field's ``type``, may not be any Field subclass name) * ``translate``, a boolean flag (``0`` or ``1``) denoting whether the field is translatable * ``expression``, the original expression :returns: iterable of (attribute name, attribute value) pairs. """ field = record._fields[field_name] field_type = get_field_type(field, options) return [ ('data-oe-model', record._name), ('data-oe-id', record.id), ('data-oe-field', field_name), ('data-oe-type', field_type), ('data-oe-expression', t_att['field']), ] def value_to_html(self, cr, uid, value, field, options=None, context=None): """ value_to_html(cr, uid, value, field, options=None, context=None) Converts a single value to its HTML version/output """ if not value: return '' return value def record_to_html(self, cr, uid, field_name, record, options=None, context=None): """ record_to_html(cr, uid, field_name, record, options=None, context=None) Converts the specified field of the browse_record ``record`` to HTML """ field = record._fields[field_name] return self.value_to_html( cr, uid, record[field_name], field, options=options, context=context) def to_html(self, cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None): """ to_html(cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None) Converts a ``t-field`` to its HTML output. A ``t-field`` may be extended by a ``t-field-options``, which is a JSON-serialized mapping of configuration values. A default configuration key is ``widget`` which can override the field's own ``_type``. """ try: content = self.record_to_html(cr, uid, field_name, record, options, context=context) if options.get('html-escape', True): content = escape(content) elif hasattr(content, '__html__'): content = content.__html__() except Exception: _logger.warning("Could not get field %s for model %s", field_name, record._name, exc_info=True) content = None inherit_branding = context and context.get('inherit_branding') if not inherit_branding and context and context.get('inherit_branding_auto'): inherit_branding = self.pool['ir.model.access'].check(cr, uid, record._name, 'write', False, context=context) if inherit_branding: # add branding attributes g_att += ''.join( _build_attribute(name, value) for name, value in self.attributes( cr, uid, field_name, record, options, source_element, g_att, t_att, qweb_context, context=context) ) return self.render_element(cr, uid, source_element, t_att, g_att, qweb_context, content) def qweb_object(self): return self.pool['ir.qweb'] def render_element(self, cr, uid, source_element, t_att, g_att, qweb_context, content): """ render_element(cr, uid, source_element, t_att, g_att, qweb_context, content) Final rendering hook, by default just calls ir.qweb's ``render_element`` """ return self.qweb_object().render_element( source_element, t_att, g_att, qweb_context, content or '') def user_lang(self, cr, uid, context): """ user_lang(cr, uid, context) Fetches the res.lang object corresponding to the language code stored in the user's context. Fallbacks to en_US if no lang is present in the context *or the language code is not valid*. :returns: res.lang browse_record """ if context is None: context = {} lang_code = context.get('lang') or 'en_US' Lang = self.pool['res.lang'] lang_ids = Lang.search(cr, uid, [('code', '=', lang_code)], context=context) \ or Lang.search(cr, uid, [('code', '=', 'en_US')], context=context) return Lang.browse(cr, uid, lang_ids[0], context=context) class FloatConverter(osv.AbstractModel): _name = 'ir.qweb.field.float' _inherit = 'ir.qweb.field' def precision(self, cr, uid, field, options=None, context=None): _, precision = field.digits or (None, None) return precision def value_to_html(self, cr, uid, value, field, options=None, context=None): if context is None: context = {} precision = self.precision(cr, uid, field, options=options, context=context) fmt = '%f' if precision is None else '%.{precision}f' lang_code = context.get('lang') or 'en_US' lang = self.pool['res.lang'] formatted = lang.format(cr, uid, [lang_code], fmt.format(precision=precision), value, grouping=True) # %f does not strip trailing zeroes. %g does but its precision causes # it to switch to scientific notation starting at a million *and* to # strip decimals. So use %f and if no precision was specified manually # strip trailing 0. if precision is None: formatted = re.sub(r'(?:(0|\d+?)0+)$', r'\1', formatted) return formatted class DateConverter(osv.AbstractModel): _name = 'ir.qweb.field.date' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): if not value or len(value)<10: return '' lang = self.user_lang(cr, uid, context=context) locale = babel.Locale.parse(lang.code) if isinstance(value, basestring): value = datetime.datetime.strptime( value[:10], openerp.tools.DEFAULT_SERVER_DATE_FORMAT) if options and 'format' in options: pattern = options['format'] else: strftime_pattern = lang.date_format pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale) return babel.dates.format_date( value, format=pattern, locale=locale) class DateTimeConverter(osv.AbstractModel): _name = 'ir.qweb.field.datetime' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): if not value: return '' lang = self.user_lang(cr, uid, context=context) locale = babel.Locale.parse(lang.code) if isinstance(value, basestring): value = datetime.datetime.strptime( value, openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT) value = fields.datetime.context_timestamp( cr, uid, timestamp=value, context=context) if options and 'format' in options: pattern = options['format'] else: strftime_pattern = (u"%s %s" % (lang.date_format, lang.time_format)) pattern = openerp.tools.posix_to_ldml(strftime_pattern, locale=locale) if options and options.get('hide_seconds'): pattern = pattern.replace(":ss", "").replace(":s", "") return babel.dates.format_datetime(value, format=pattern, locale=locale) class TextConverter(osv.AbstractModel): _name = 'ir.qweb.field.text' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): """ Escapes the value and converts newlines to br. This is bullshit. """ if not value: return '' return nl2br(value, options=options) class SelectionConverter(osv.AbstractModel): _name = 'ir.qweb.field.selection' _inherit = 'ir.qweb.field' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): value = record[field_name] if not value: return '' field = record._fields[field_name] selection = dict(field.get_description(record.env)['selection']) return self.value_to_html( cr, uid, selection[value], field, options=options) class ManyToOneConverter(osv.AbstractModel): _name = 'ir.qweb.field.many2one' _inherit = 'ir.qweb.field' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): [read] = record.read([field_name]) if not read[field_name]: return '' _, value = read[field_name] return nl2br(value, options=options) class HTMLConverter(osv.AbstractModel): _name = 'ir.qweb.field.html' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): return HTMLSafe(value or '') class ImageConverter(osv.AbstractModel): """ ``image`` widget rendering, inserts a data:uri-using image tag in the document. May be overridden by e.g. the website module to generate links instead. .. todo:: what happens if different output need different converters? e.g. reports may need embedded images or FS links whereas website needs website-aware """ _name = 'ir.qweb.field.image' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): try: image = Image.open(cStringIO.StringIO(value.decode('base64'))) image.verify() except IOError: raise ValueError("Non-image binary fields can not be converted to HTML") except: # image.verify() throws "suitable exceptions", I have no idea what they are raise ValueError("Invalid image content") return HTMLSafe('<img src="data:%s;base64,%s">' % (Image.MIME[image.format], value)) class MonetaryConverter(osv.AbstractModel): """ ``monetary`` converter, has a mandatory option ``display_currency``. The currency is used for formatting *and rounding* of the float value. It is assumed that the linked res_currency has a non-empty rounding value and res.currency's ``round`` method is used to perform rounding. .. note:: the monetary converter internally adds the qweb context to its options mapping, so that the context is available to callees. It's set under the ``_qweb_context`` key. """ _name = 'ir.qweb.field.monetary' _inherit = 'ir.qweb.field' def to_html(self, cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=None): options['_qweb_context'] = qweb_context return super(MonetaryConverter, self).to_html( cr, uid, field_name, record, options, source_element, t_att, g_att, qweb_context, context=context) def record_to_html(self, cr, uid, field_name, record, options, context=None): if context is None: context = {} Currency = self.pool['res.currency'] display_currency = self.display_currency(cr, uid, options['display_currency'], options) # lang.format mandates a sprintf-style format. These formats are non- # minimal (they have a default fixed precision instead), and # lang.format will not set one by default. currency.round will not # provide one either. So we need to generate a precision value # (integer > 0) from the currency's rounding (a float generally < 1.0). # # The log10 of the rounding should be the number of digits involved if # negative, if positive clamp to 0 digits and call it a day. # nb: int() ~ floor(), we want nearest rounding instead precision = int(math.floor(math.log10(display_currency.rounding))) fmt = "%.{0}f".format(-precision if precision < 0 else 0) from_amount = record[field_name] if options.get('from_currency'): from_currency = self.display_currency(cr, uid, options['from_currency'], options) from_amount = Currency.compute(cr, uid, from_currency.id, display_currency.id, from_amount) lang_code = context.get('lang') or 'en_US' lang = self.pool['res.lang'] formatted_amount = lang.format(cr, uid, [lang_code], fmt, Currency.round(cr, uid, display_currency, from_amount), grouping=True, monetary=True) pre = post = u'' if display_currency.position == 'before': pre = u'{symbol}\N{NO-BREAK SPACE}' else: post = u'\N{NO-BREAK SPACE}{symbol}' return HTMLSafe(u'{pre}<span class="oe_currency_value">{0}</span>{post}'.format( formatted_amount, pre=pre, post=post, ).format( symbol=display_currency.symbol, )) def display_currency(self, cr, uid, currency, options): return self.qweb_object().eval_object( currency, options['_qweb_context']) TIMEDELTA_UNITS = ( ('year', 3600 * 24 * 365), ('month', 3600 * 24 * 30), ('week', 3600 * 24 * 7), ('day', 3600 * 24), ('hour', 3600), ('minute', 60), ('second', 1) ) class DurationConverter(osv.AbstractModel): """ ``duration`` converter, to display integral or fractional values as human-readable time spans (e.g. 1.5 as "1 hour 30 minutes"). Can be used on any numerical field. Has a mandatory option ``unit`` which can be one of ``second``, ``minute``, ``hour``, ``day``, ``week`` or ``year``, used to interpret the numerical field value before converting it. Sub-second values will be ignored. """ _name = 'ir.qweb.field.duration' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): units = dict(TIMEDELTA_UNITS) if value < 0: raise ValueError(_("Durations can't be negative")) if not options or options.get('unit') not in units: raise ValueError(_("A unit must be provided to duration widgets")) locale = babel.Locale.parse( self.user_lang(cr, uid, context=context).code) factor = units[options['unit']] sections = [] r = value * factor for unit, secs_per_unit in TIMEDELTA_UNITS: v, r = divmod(r, secs_per_unit) if not v: continue section = babel.dates.format_timedelta( v*secs_per_unit, threshold=1, locale=locale) if section: sections.append(section) return ' '.join(sections) class RelativeDatetimeConverter(osv.AbstractModel): _name = 'ir.qweb.field.relative' _inherit = 'ir.qweb.field' def value_to_html(self, cr, uid, value, field, options=None, context=None): parse_format = openerp.tools.DEFAULT_SERVER_DATETIME_FORMAT locale = babel.Locale.parse( self.user_lang(cr, uid, context=context).code) if isinstance(value, basestring): value = datetime.datetime.strptime(value, parse_format) # value should be a naive datetime in UTC. So is fields.Datetime.now() reference = datetime.datetime.strptime(field.now(), parse_format) return babel.dates.format_timedelta( value - reference, add_direction=True, locale=locale) class Contact(orm.AbstractModel): _name = 'ir.qweb.field.contact' _inherit = 'ir.qweb.field.many2one' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): if context is None: context = {} if options is None: options = {} opf = options.get('fields') or ["name", "address", "phone", "mobile", "fax", "email"] value_rec = record[field_name] if not value_rec: return None value_rec = value_rec.sudo().with_context(show_address=True) value = value_rec.name_get()[0][1] val = { 'name': value.split("\n")[0], 'address': escape("\n".join(value.split("\n")[1:])), 'phone': value_rec.phone, 'mobile': value_rec.mobile, 'fax': value_rec.fax, 'city': value_rec.city, 'country_id': value_rec.country_id.display_name, 'website': value_rec.website, 'email': value_rec.email, 'fields': opf, 'object': value_rec, 'options': options } html = self.pool["ir.ui.view"].render(cr, uid, "base.contact", val, engine='ir.qweb', context=context).decode('utf8') return HTMLSafe(html) class QwebView(orm.AbstractModel): _name = 'ir.qweb.field.qweb' _inherit = 'ir.qweb.field.many2one' def record_to_html(self, cr, uid, field_name, record, options=None, context=None): if not getattr(record, field_name): return None view = getattr(record, field_name) if view._model._name != "ir.ui.view": _logger.warning("%s.%s must be a 'ir.ui.view' model." % (record, field_name)) return None ctx = (context or {}).copy() ctx['object'] = record html = view.render(ctx, engine='ir.qweb', context=ctx).decode('utf8') return HTMLSafe(html) class QwebWidget(osv.AbstractModel): _name = 'ir.qweb.widget' def _format(self, inner, options, qwebcontext): return self.pool['ir.qweb'].eval_str(inner, qwebcontext) def format(self, inner, options, qwebcontext): return escape(self._format(inner, options, qwebcontext)) class QwebWidgetMonetary(osv.AbstractModel): _name = 'ir.qweb.widget.monetary' _inherit = 'ir.qweb.widget' def _format(self, inner, options, qwebcontext): inner = self.pool['ir.qweb'].eval(inner, qwebcontext) display = self.pool['ir.qweb'].eval_object(options['display_currency'], qwebcontext) precision = int(round(math.log10(display.rounding))) fmt = "%.{0}f".format(-precision if precision < 0 else 0) lang_code = qwebcontext.context.get('lang') or 'en_US' formatted_amount = self.pool['res.lang'].format( qwebcontext.cr, qwebcontext.uid, [lang_code], fmt, inner, grouping=True, monetary=True ) pre = post = u'' if display.position == 'before': pre = u'{symbol}\N{NO-BREAK SPACE}' else: post = u'\N{NO-BREAK SPACE}{symbol}' return u'{pre}{0}{post}'.format( formatted_amount, pre=pre, post=post ).format(symbol=display.symbol,) class HTMLSafe(object): """ HTMLSafe string wrapper, Werkzeug's escape() has special handling for objects with a ``__html__`` methods but AFAIK does not provide any such object. Wrapping a string in HTML will prevent its escaping """ __slots__ = ['string'] def __init__(self, string): self.string = string def __html__(self): return self.string def __str__(self): s = self.string if isinstance(s, unicode): return s.encode('utf-8') return s def __unicode__(self): s = self.string if isinstance(s, str): return s.decode('utf-8') return s def nl2br(string, options=None): """ Converts newlines to HTML linebreaks in ``string``. Automatically escapes content unless options['html-escape'] is set to False, and returns the result wrapped in an HTMLSafe object. :param str string: :param dict options: :rtype: HTMLSafe """ if options is None: options = {} if options.get('html-escape', True): string = escape(string) return HTMLSafe(string.replace('\n', '<br>\n')) def get_field_type(field, options): """ Gets a t-field's effective type from the field definition and its options """ return options.get('widget', field.type) class AssetError(Exception): pass class AssetNotFound(AssetError): pass class AssetsBundle(object): # Sass installation: # # sudo gem install sass compass bootstrap-sass # # If the following error is encountered: # 'ERROR: Cannot load compass.' # Use this: # sudo gem install compass --pre cmd_sass = ['sass', '--stdin', '-t', 'compressed', '--unix-newlines', '--compass', '-r', 'bootstrap-sass'] rx_css_import = re.compile("(@import[^;{]+;?)", re.M) rx_sass_import = re.compile("""(@import\s?['"]([^'"]+)['"])""") rx_css_split = re.compile("\/\*\! ([a-f0-9-]+) \*\/") def __init__(self, xmlid, debug=False, cr=None, uid=None, context=None, registry=None): self.xmlid = xmlid self.cr = request.cr if cr is None else cr self.uid = request.uid if uid is None else uid self.context = request.context if context is None else context self.registry = request.registry if registry is None else registry self.javascripts = [] self.stylesheets = [] self.css_errors = [] self.remains = [] self._checksum = None context = self.context.copy() context['inherit_branding'] = False context['rendering_bundle'] = True self.html = self.registry['ir.ui.view'].render(self.cr, self.uid, xmlid, context=context) self.parse() def parse(self): fragments = html.fragments_fromstring(self.html) for el in fragments: if isinstance(el, basestring): self.remains.append(el) elif isinstance(el, html.HtmlElement): src = el.get('src', '') href = el.get('href', '') atype = el.get('type') media = el.get('media') if el.tag == 'style': if atype == 'text/sass' or src.endswith('.sass'): self.stylesheets.append(SassAsset(self, inline=el.text, media=media)) else: self.stylesheets.append(StylesheetAsset(self, inline=el.text, media=media)) elif el.tag == 'link' and el.get('rel') == 'stylesheet' and self.can_aggregate(href): if href.endswith('.sass') or atype == 'text/sass': self.stylesheets.append(SassAsset(self, url=href, media=media)) else: self.stylesheets.append(StylesheetAsset(self, url=href, media=media)) elif el.tag == 'script' and not src: self.javascripts.append(JavascriptAsset(self, inline=el.text)) elif el.tag == 'script' and self.can_aggregate(src): self.javascripts.append(JavascriptAsset(self, url=src)) else: self.remains.append(html.tostring(el)) else: try: self.remains.append(html.tostring(el)) except Exception: # notYETimplementederror raise NotImplementedError def can_aggregate(self, url): return not urlparse(url).netloc and not url.startswith(('/web/css', '/web/js')) def to_html(self, sep=None, css=True, js=True, debug=False): if sep is None: sep = '\n ' response = [] if debug: if css and self.stylesheets: self.compile_sass() for style in self.stylesheets: response.append(style.to_html()) if js: for jscript in self.javascripts: response.append(jscript.to_html()) else: url_for = self.context.get('url_for', lambda url: url) if css and self.stylesheets: suffix = '' if request: ua = request.httprequest.user_agent if ua.browser == "msie" and int((ua.version or '0').split('.')[0]) < 10: suffix = '.0' href = '/web/css%s/%s/%s' % (suffix, self.xmlid, self.version) response.append('<link href="%s" rel="stylesheet"/>' % url_for(href)) if js: src = '/web/js/%s/%s' % (self.xmlid, self.version) response.append('<script type="text/javascript" src="%s"></script>' % url_for(src)) response.extend(self.remains) return sep + sep.join(response) @lazy_property def last_modified(self): """Returns last modified date of linked files""" return max(itertools.chain( (asset.last_modified for asset in self.javascripts), (asset.last_modified for asset in self.stylesheets), )) @lazy_property def version(self): return self.checksum[0:7] @lazy_property def checksum(self): """ Not really a full checksum. We compute a SHA1 on the rendered bundle + max linked files last_modified date """ check = self.html + str(self.last_modified) return hashlib.sha1(check).hexdigest() def js(self): content = self.get_cache('js') if content is None: content = ';\n'.join(asset.minify() for asset in self.javascripts) self.set_cache('js', content) return content def css(self, page_number=None): if page_number is not None: return self.css_page(page_number) content = self.get_cache('css') if content is None: self.compile_sass() content = '\n'.join(asset.minify() for asset in self.stylesheets) if self.css_errors: msg = '\n'.join(self.css_errors) content += self.css_message(msg.replace('\n', '\\A ')) # move up all @import rules to the top matches = [] def push(matchobj): matches.append(matchobj.group(0)) return '' content = re.sub(self.rx_css_import, push, content) matches.append(content) content = u'\n'.join(matches) if not self.css_errors: self.set_cache('css', content) content = content.encode('utf-8') return content def css_page(self, page_number): content = self.get_cache('css.%d' % (page_number,)) if page_number: return content if content is None: css = self.css().decode('utf-8') re_rules = '([^{]+\{(?:[^{}]|\{[^{}]*\})*\})' re_selectors = '()(?:\s*@media\s*[^{]*\{)?(?:\s*(?:[^,{]*(?:,|\{(?:[^}]*\}))))' css_url = '@import url(\'/web/css.%%d/%s/%s\');' % (self.xmlid, self.version) pages = [[]] page = pages[0] page_selectors = 0 for rule in re.findall(re_rules, css): selectors = len(re.findall(re_selectors, rule)) if page_selectors + selectors < MAX_CSS_RULES: page_selectors += selectors page.append(rule) else: pages.append([rule]) page = pages[-1] page_selectors = selectors if len(pages) == 1: pages = [] for idx, page in enumerate(pages): self.set_cache("css.%d" % (idx+1), ''.join(page)) content = '\n'.join(css_url % i for i in range(1,len(pages)+1)) self.set_cache("css.0", content) if not content: return self.css() return content def get_cache(self, type): content = None domain = [('url', '=', '/web/%s/%s/%s' % (type, self.xmlid, self.version))] bundle = self.registry['ir.attachment'].search_read(self.cr, openerp.SUPERUSER_ID, domain, ['datas'], context=self.context) if bundle and bundle[0]['datas']: content = bundle[0]['datas'].decode('base64') return content def set_cache(self, type, content): ira = self.registry['ir.attachment'] url_prefix = '/web/%s/%s/' % (type, self.xmlid) # Invalidate previous caches try: with self.cr.savepoint(): domain = [('url', '=like', url_prefix + '%')] oids = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context) if oids: ira.unlink(self.cr, openerp.SUPERUSER_ID, oids, context=self.context) url = url_prefix + self.version ira.create(self.cr, openerp.SUPERUSER_ID, dict( datas=content.encode('utf8').encode('base64'), type='binary', name=url, url=url, ), context=self.context) except psycopg2.Error: pass def css_message(self, message): return """ body:before { background: #ffc; width: 100%%; font-size: 14px; font-family: monospace; white-space: pre; content: "%s"; } """ % message.replace('"', '\\"') def compile_sass(self): """ Checks if the bundle contains any sass content, then compiles it to css. Css compilation is done at the bundle level and not in the assets because they are potentially interdependant. """ sass = [asset for asset in self.stylesheets if isinstance(asset, SassAsset)] if not sass: return source = '\n'.join([asset.get_source() for asset in sass]) # move up all @import rules to the top and exclude file imports imports = [] def push(matchobj): ref = matchobj.group(2) line = '@import "%s"' % ref if '.' not in ref and line not in imports and not ref.startswith(('.', '/', '~')): imports.append(line) return '' source = re.sub(self.rx_sass_import, push, source) imports.append(source) source = u'\n'.join(imports) try: compiler = Popen(self.cmd_sass, stdin=PIPE, stdout=PIPE, stderr=PIPE) except Exception: msg = "Could not find 'sass' program needed to compile sass/scss files" _logger.error(msg) self.css_errors.append(msg) return result = compiler.communicate(input=source.encode('utf-8')) if compiler.returncode: error = self.get_sass_error(''.join(result), source=source) _logger.warning(error) self.css_errors.append(error) return compiled = result[0].strip().decode('utf8') fragments = self.rx_css_split.split(compiled)[1:] while fragments: asset_id = fragments.pop(0) asset = next(asset for asset in sass if asset.id == asset_id) asset._content = fragments.pop(0) def get_sass_error(self, stderr, source=None): # TODO: try to find out which asset the error belongs to error = stderr.split('Load paths')[0].replace(' Use --trace for backtrace.', '') error += "This error occured while compiling the bundle '%s' containing:" % self.xmlid for asset in self.stylesheets: if isinstance(asset, SassAsset): error += '\n - %s' % (asset.url if asset.url else '<inline sass>') return error class WebAsset(object): html_url = '%s' def __init__(self, bundle, inline=None, url=None): self.id = str(uuid.uuid4()) self.bundle = bundle self.inline = inline self.url = url self.cr = bundle.cr self.uid = bundle.uid self.registry = bundle.registry self.context = bundle.context self._content = None self._filename = None self._ir_attach = None name = '<inline asset>' if inline else url self.name = "%s defined in bundle '%s'" % (name, bundle.xmlid) if not inline and not url: raise Exception("An asset should either be inlined or url linked") def stat(self): if not (self.inline or self._filename or self._ir_attach): addon = filter(None, self.url.split('/'))[0] try: # Test url against modules static assets mpath = openerp.http.addons_manifest[addon]['addons_path'] self._filename = mpath + self.url.replace('/', os.path.sep) except Exception: try: # Test url against ir.attachments fields = ['__last_update', 'datas', 'mimetype'] domain = [('type', '=', 'binary'), ('url', '=', self.url)] ira = self.registry['ir.attachment'] attach = ira.search_read(self.cr, openerp.SUPERUSER_ID, domain, fields, context=self.context) self._ir_attach = attach[0] except Exception: raise AssetNotFound("Could not find %s" % self.name) def to_html(self): raise NotImplementedError() @lazy_property def last_modified(self): try: self.stat() if self._filename: return datetime.datetime.fromtimestamp(os.path.getmtime(self._filename)) elif self._ir_attach: server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT last_update = self._ir_attach['__last_update'] try: return datetime.datetime.strptime(last_update, server_format + '.%f') except ValueError: return datetime.datetime.strptime(last_update, server_format) except Exception: pass return datetime.datetime(1970, 1, 1) @property def content(self): if not self._content: self._content = self.inline or self._fetch_content() return self._content def _fetch_content(self): """ Fetch content from file or database""" try: self.stat() if self._filename: with open(self._filename, 'rb') as fp: return fp.read().decode('utf-8') else: return self._ir_attach['datas'].decode('base64') except UnicodeDecodeError: raise AssetError('%s is not utf-8 encoded.' % self.name) except IOError: raise AssetNotFound('File %s does not exist.' % self.name) except: raise AssetError('Could not get content for %s.' % self.name) def minify(self): return self.content def with_header(self, content=None): if content is None: content = self.content return '\n/* %s */\n%s' % (self.name, content) class JavascriptAsset(WebAsset): def minify(self): return self.with_header(rjsmin(self.content)) def _fetch_content(self): try: return super(JavascriptAsset, self)._fetch_content() except AssetError, e: return "console.error(%s);" % json.dumps(e.message) def to_html(self): if self.url: return '<script type="text/javascript" src="%s"></script>' % (self.html_url % self.url) else: return '<script type="text/javascript" charset="utf-8">%s</script>' % self.with_header() class StylesheetAsset(WebAsset): rx_import = re.compile(r"""@import\s+('|")(?!'|"|/|https?://)""", re.U) rx_url = re.compile(r"""url\s*\(\s*('|"|)(?!'|"|/|https?://|data:)""", re.U) rx_sourceMap = re.compile(r'(/\*# sourceMappingURL=.*)', re.U) rx_charset = re.compile(r'(@charset "[^"]+";)', re.U) def __init__(self, *args, **kw): self.media = kw.pop('media', None) super(StylesheetAsset, self).__init__(*args, **kw) @property def content(self): content = super(StylesheetAsset, self).content if self.media: content = '@media %s { %s }' % (self.media, content) return content def _fetch_content(self): try: content = super(StylesheetAsset, self)._fetch_content() web_dir = os.path.dirname(self.url) content = self.rx_import.sub( r"""@import \1%s/""" % (web_dir,), content, ) content = self.rx_url.sub( r"url(\1%s/" % (web_dir,), content, ) # remove charset declarations, we only support utf-8 content = self.rx_charset.sub('', content) except AssetError, e: self.bundle.css_errors.append(e.message) return '' return content def minify(self): # remove existing sourcemaps, make no sense after re-mini content = self.rx_sourceMap.sub('', self.content) # comments content = re.sub(r'/\*.*?\*/', '', content, flags=re.S) # space content = re.sub(r'\s+', ' ', content) content = re.sub(r' *([{}]) *', r'\1', content) return self.with_header(content) def to_html(self): media = (' media="%s"' % werkzeug.utils.escape(self.media)) if self.media else '' if self.url: href = self.html_url % self.url return '<link rel="stylesheet" href="%s" type="text/css"%s/>' % (href, media) else: return '<style type="text/css"%s>%s</style>' % (media, self.with_header()) class SassAsset(StylesheetAsset): html_url = '%s.css' rx_indent = re.compile(r'^( +|\t+)', re.M) indent = None reindent = ' ' def minify(self): return self.with_header() def to_html(self): if self.url: try: ira = self.registry['ir.attachment'] url = self.html_url % self.url domain = [('type', '=', 'binary'), ('url', '=', self.url)] with self.cr.savepoint(): ira_id = ira.search(self.cr, openerp.SUPERUSER_ID, domain, context=self.context) if ira_id: # TODO: update only if needed ira.write(self.cr, openerp.SUPERUSER_ID, [ira_id], {'datas': self.content}, context=self.context) else: ira.create(self.cr, openerp.SUPERUSER_ID, dict( datas=self.content.encode('utf8').encode('base64'), mimetype='text/css', type='binary', name=url, url=url, ), context=self.context) except psycopg2.Error: pass return super(SassAsset, self).to_html() def get_source(self): content = textwrap.dedent(self.inline or self._fetch_content()) def fix_indent(m): ind = m.group() if self.indent is None: self.indent = ind if self.indent == self.reindent: # Don't reindent the file if identation is the final one (reindent) raise StopIteration() return ind.replace(self.indent, self.reindent) try: content = self.rx_indent.sub(fix_indent, content) except StopIteration: pass return "/*! %s */\n%s" % (self.id, content) def rjsmin(script): """ Minify js with a clever regex. Taken from http://opensource.perlig.de/rjsmin Apache License, Version 2.0 """ def subber(match): """ Substitution callback """ groups = match.groups() return ( groups[0] or groups[1] or groups[2] or groups[3] or (groups[4] and '\n') or (groups[5] and ' ') or (groups[6] and ' ') or (groups[7] and ' ') or '' ) result = re.sub( r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?' r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|' r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]' r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/' r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*' r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*' r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01' r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/' r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]' r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./' r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/' r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01' r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-\040"#' r'%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-' r'\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^' r'\000-#%-,./:-@\[-^`{-~-])|(?<=\+)((?:[\000-\011\013\014\016-\040]|' r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=\+)|(?<=-)((?:[\000-\011\0' r'13\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=-)|(?:[\0' r'00-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:' r'(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*' r']*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script ).strip() return result # vim:et:
thedrow/pgcli
refs/heads/master
pgcli/packages/pgspecial/main.py
8
import logging from collections import namedtuple from . import export log = logging.getLogger(__name__) NO_QUERY = 0 PARSED_QUERY = 1 RAW_QUERY = 2 SpecialCommand = namedtuple('SpecialCommand', ['handler', 'syntax', 'description', 'arg_type', 'hidden', 'case_sensitive']) @export class CommandNotFound(Exception): pass @export class PGSpecial(object): # Default static commands that don't rely on PGSpecial state are registered # via the special_command decorator and stored in default_commands default_commands = {} def __init__(self): self.timing_enabled = True self.commands = self.default_commands.copy() self.timing_enabled = False self.expanded_output = False self.register(self.show_help, '\\?', '\\?', 'Show Help.', arg_type=NO_QUERY) self.register(self.toggle_expanded_output, '\\x', '\\x', 'Toggle expanded output.', arg_type=NO_QUERY) self.register(self.toggle_timing, '\\timing', '\\timing', 'Toggle timing of commands.', arg_type=NO_QUERY) def register(self, *args, **kwargs): register_special_command(*args, command_dict=self.commands, **kwargs) def execute(self, cur, sql): commands = self.commands command, verbose, pattern = parse_special_command(sql) if (command not in commands) and (command.lower() not in commands): raise CommandNotFound try: special_cmd = commands[command] except KeyError: special_cmd = commands[command.lower()] if special_cmd.case_sensitive: raise CommandNotFound('Command not found: %s' % command) if special_cmd.arg_type == NO_QUERY: return special_cmd.handler() elif special_cmd.arg_type == PARSED_QUERY: return special_cmd.handler(cur=cur, pattern=pattern, verbose=verbose) elif special_cmd.arg_type == RAW_QUERY: return special_cmd.handler(cur=cur, query=sql) def show_help(self): headers = ['Command', 'Description'] result = [] for _, value in sorted(self.commands.items()): if not value.hidden: result.append((value.syntax, value.description)) return [(None, result, headers, None)] def toggle_expanded_output(self): self.expanded_output = not self.expanded_output message = u"Expanded display is " message += u"on." if self.expanded_output else u"off." return [(None, None, None, message)] def toggle_timing(self): self.timing_enabled = not self.timing_enabled message = "Timing is " message += "on." if self.timing_enabled else "off." return [(None, None, None, message)] @export def parse_special_command(sql): command, _, arg = sql.partition(' ') verbose = '+' in command command = command.strip().replace('+', '') return (command, verbose, arg.strip()) def special_command(command, syntax, description, arg_type=PARSED_QUERY, hidden=False, case_sensitive=True, aliases=()): """A decorator used internally for static special commands""" def wrapper(wrapped): register_special_command(wrapped, command, syntax, description, arg_type, hidden, case_sensitive, aliases, command_dict=PGSpecial.default_commands) return wrapped return wrapper def register_special_command(handler, command, syntax, description, arg_type=PARSED_QUERY, hidden=False, case_sensitive=True, aliases=(), command_dict=None): cmd = command.lower() if not case_sensitive else command command_dict[cmd] = SpecialCommand(handler, syntax, description, arg_type, hidden, case_sensitive) for alias in aliases: cmd = alias.lower() if not case_sensitive else alias command_dict[cmd] = SpecialCommand(handler, syntax, description, arg_type, case_sensitive=case_sensitive, hidden=True) @special_command('\\e', '\\e [file]', 'Edit the query with external editor.', arg_type=NO_QUERY) def doc_only(): raise RuntimeError @special_command('\\ef', '\\ef [funcname [line]]', 'Edit the contents of the query buffer.', arg_type=NO_QUERY, hidden=True) @special_command('\\sf', '\\sf[+] FUNCNAME', 'Show a function\'s definition.', arg_type=NO_QUERY, hidden=True) @special_command('\\do', '\\do[S] [pattern]', 'List operators.', arg_type=NO_QUERY, hidden=True) @special_command('\\dp', '\\dp [pattern]', 'List table, view, and sequence access privileges.', arg_type=NO_QUERY, hidden=True) @special_command('\\z', '\\z [pattern]', 'Same as \\dp.', arg_type=NO_QUERY, hidden=True) def place_holder(): raise NotImplementedError
cernops/nova
refs/heads/master
doc/source/conf.py
9
# -*- coding: utf-8 -*- # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import subprocess import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'ext.nova_todo', 'sphinx.ext.coverage', 'sphinx.ext.graphviz', 'oslosphinx', "ext.support_matrix", 'oslo_config.sphinxconfiggen', 'ext.versioned_notifications' ] config_generator_config_file = '../../etc/nova/nova-config-generator.conf' sample_config_basename = '_static/nova' todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'nova' copyright = u'2010-present, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # from nova.version import version_info # The full version, including alpha/beta/rc tags. release = version_info.release_string() # The short X.Y version. version = version_info.version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # A list of glob-style patterns that should be excluded when looking for # source files. They are matched against the source file names relative to the # source directory, using slashes as directory separators on all platforms. exclude_patterns = [ 'api/nova.wsgi.nova-*', 'api/nova.tests.*', ] # The reST default role (used for this markup: `text`) to use # for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['nova.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/nova-all', 'nova-all', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api-os-compute', 'nova-api-os-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-api', 'nova-api', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-cells', 'nova-cells', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-cert', 'nova-cert', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-compute', 'nova-compute', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-console', 'nova-console', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-idmapshift', 'nova-idmapshift', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-manage', 'nova-manage', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-network', 'nova-network', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-serialproxy', 'nova-serialproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric', [u'OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'novadoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Nova.tex', u'Nova Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
mbedmicro/pyOCD
refs/heads/master
pyocd/trace/__init__.py
9
# pyOCD debugger # Copyright (c) 2017 Arm Limited # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
MiLk/ansible
refs/heads/devel
lib/ansible/modules/cloud/google/gcspanner.py
4
#!/usr/bin/python # Copyright 2017 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcspanner version_added: "2.3" short_description: Create and Delete Instances/Databases on Spanner. description: - Create and Delete Instances/Databases on Spanner. See U(https://cloud.google.com/spanner/docs) for an overview. requirements: - "python >= 2.6" - "google-auth >= 0.5.0" - "google-cloud-spanner >= 0.23.0" notes: - Changing the configuration on an existing instance is not supported. author: - "Tom Melendez (@supertom) <[email protected]>" options: configuration: description: - Configuration the instance should use. Examples are us-central1, asia-east1 and europe-west1. required: True instance_id: description: - GCP spanner instance name. required: True database_name: description: - Name of database contained on the instance. required: False force_instance_delete: description: - To delete an instance, this argument must exist and be true (along with state being equal to absent). required: False default: False instance_display_name: description: - Name of Instance to display. If not specified, instance_id will be used instead. required: False node_count: description: - Number of nodes in the instance. If not specified while creating an instance, node_count will be set to 1. required: False state: description: State of the instance or database (absent, present). Applies to the most granular resource. If a database_name is specified we remove it. If only instance_id is specified, that is what is removed. required: False default: "present" ''' EXAMPLES = ''' # Create instance. gcspanner: instance_id: "{{ instance_id }}" configuration: "{{ configuration }}" state: present node_count: 1 # Create database. gcspanner: instance_id: "{{ instance_id }}" configuration: "{{ configuration }}" database_name: "{{ database_name }}" state: present # Delete instance (and all databases) gcspanner: instance_id: "{{ instance_id }}" configuration: "{{ configuration }}" state: absent force_instance_delete: yes ''' RETURN = ''' state: description: The state of the instance or database. Value will be either 'absent' or 'present'. returned: Always type: str sample: "present" database_name: description: Name of database. returned: When database name is specified type: str sample: "mydatabase" instance_id: description: Name of instance. returned: Always type: str sample: "myinstance" previous_values: description: List of dictionaries containing previous values prior to update. returned: When an instance update has occurred and a field has been modified. type: dict sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }" updated: description: Boolean field to denote an update has occurred. returned: When an update has occurred. type: bool sample: True ''' try: from ast import literal_eval HAS_PYTHON26 = True except ImportError: HAS_PYTHON26 = False try: from google.cloud import spanner from google.gax.errors import GaxError HAS_GOOGLE_CLOUD_SPANNER = True except ImportError as e: HAS_GOOGLE_CLOUD_SPANNER = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials from ansible.module_utils.six import string_types CLOUD_CLIENT = 'google-cloud-spanner' CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0' CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1' def get_spanner_configuration_name(config_name, project_name): config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name, config_name) return config_name def instance_update(instance): """ Call update method on spanner client. Note: A ValueError exception is thrown despite the client succeeding. So, we validate the node_count and instance_display_name parameters and then ignore the ValueError exception. :param instance: a Spanner instance object :type instance: class `google.cloud.spanner.Instance` :returns True on success, raises ValueError on type error. :rtype ``bool`` """ errmsg = '' if not isinstance(instance.node_count, int): errmsg = 'node_count must be an integer %s (%s)' % ( instance.node_count, type(instance.node_count)) if instance.display_name and not isinstance(instance.display_name, string_types): errmsg = 'instance_display_name must be an string %s (%s)' % ( instance.display_name, type(instance.display_name)) if errmsg: raise ValueError(errmsg) try: instance.update() except ValueError as e: # The ValueError here is the one we 'expect'. pass return True def main(): module = AnsibleModule(argument_spec=dict( instance_id=dict(type='str', required=True), state=dict(choices=['absent', 'present'], default='present'), database_name=dict(type='str', default=None), configuration=dict(type='str', required=True), node_count=dict(type='int'), instance_display_name=dict(type='str', default=None), force_instance_delete=dict(type='bool', default=False), service_account_email=dict(), credentials_file=dict(), project_id=dict(), ), ) if not HAS_PYTHON26: module.fail_json( msg="GCE module requires python's 'ast' module, python v2.6+") if not HAS_GOOGLE_CLOUD_SPANNER: module.fail_json(msg="Please install google-cloud-spanner.") if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION): module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION)) mod_params = {} mod_params['state'] = module.params.get('state') mod_params['instance_id'] = module.params.get('instance_id') mod_params['database_name'] = module.params.get('database_name') mod_params['configuration'] = module.params.get('configuration') mod_params['node_count'] = module.params.get('node_count', None) mod_params['instance_display_name'] = module.params.get('instance_display_name') mod_params['force_instance_delete'] = module.params.get('force_instance_delete') creds, params = get_google_cloud_credentials(module) spanner_client = spanner.Client(project=params['project_id'], credentials=creds, user_agent=CLOUD_CLIENT_USER_AGENT) changed = False json_output = {} i = None if mod_params['instance_id']: config_name = get_spanner_configuration_name( mod_params['configuration'], params['project_id']) i = spanner_client.instance(mod_params['instance_id'], configuration_name=config_name) d = None if mod_params['database_name']: # TODO(supertom): support DDL ddl_statements = '' d = i.database(mod_params['database_name'], ddl_statements) if mod_params['state'] == 'absent': # Remove the most granular resource. If database is specified # we remove it. If only instance is specified, that is what is removed. if d is not None and d.exists(): d.drop() changed = True else: if i.exists(): if mod_params['force_instance_delete']: i.delete() else: module.fail_json( msg=(("Cannot delete Spanner instance: " "'force_instance_delete' argument not specified"))) changed = True elif mod_params['state'] == 'present': if not i.exists(): i = spanner_client.instance(mod_params['instance_id'], configuration_name=config_name, display_name=mod_params['instance_display_name'], node_count=mod_params['node_count'] or 1) i.create() changed = True else: # update instance i.reload() inst_prev_vals = {} if i.display_name != mod_params['instance_display_name']: inst_prev_vals['instance_display_name'] = i.display_name i.display_name = mod_params['instance_display_name'] if mod_params['node_count']: if i.node_count != mod_params['node_count']: inst_prev_vals['node_count'] = i.node_count i.node_count = mod_params['node_count'] if inst_prev_vals: changed = instance_update(i) json_output['updated'] = changed json_output['previous_values'] = {'instance': inst_prev_vals} if d: if not d.exists(): d.create() d.reload() changed = True json_output['changed'] = changed json_output.update(mod_params) module.exit_json(**json_output) if __name__ == '__main__': main()
Mj258/weiboapi
refs/heads/master
srapyDemo/envs/Lib/site-packages/pythonwin/pywin/scintilla/scintillacon.py
24
# Generated by h2py from Include\scintilla.h # Included from BaseTsd.h def HandleToUlong(h): return HandleToULong(h) def UlongToHandle(ul): return ULongToHandle(ul) def UlongToPtr(ul): return ULongToPtr(ul) def UintToPtr(ui): return UIntToPtr(ui) INVALID_POSITION = -1 SCI_START = 2000 SCI_OPTIONAL_START = 3000 SCI_LEXER_START = 4000 SCI_ADDTEXT = 2001 SCI_ADDSTYLEDTEXT = 2002 SCI_INSERTTEXT = 2003 SCI_CLEARALL = 2004 SCI_CLEARDOCUMENTSTYLE = 2005 SCI_GETLENGTH = 2006 SCI_GETCHARAT = 2007 SCI_GETCURRENTPOS = 2008 SCI_GETANCHOR = 2009 SCI_GETSTYLEAT = 2010 SCI_REDO = 2011 SCI_SETUNDOCOLLECTION = 2012 SCI_SELECTALL = 2013 SCI_SETSAVEPOINT = 2014 SCI_GETSTYLEDTEXT = 2015 SCI_CANREDO = 2016 SCI_MARKERLINEFROMHANDLE = 2017 SCI_MARKERDELETEHANDLE = 2018 SCI_GETUNDOCOLLECTION = 2019 SCWS_INVISIBLE = 0 SCWS_VISIBLEALWAYS = 1 SCWS_VISIBLEAFTERINDENT = 2 SCI_GETVIEWWS = 2020 SCI_SETVIEWWS = 2021 SCI_POSITIONFROMPOINT = 2022 SCI_POSITIONFROMPOINTCLOSE = 2023 SCI_GOTOLINE = 2024 SCI_GOTOPOS = 2025 SCI_SETANCHOR = 2026 SCI_GETCURLINE = 2027 SCI_GETENDSTYLED = 2028 SC_EOL_CRLF = 0 SC_EOL_CR = 1 SC_EOL_LF = 2 SCI_CONVERTEOLS = 2029 SCI_GETEOLMODE = 2030 SCI_SETEOLMODE = 2031 SCI_STARTSTYLING = 2032 SCI_SETSTYLING = 2033 SCI_GETBUFFEREDDRAW = 2034 SCI_SETBUFFEREDDRAW = 2035 SCI_SETTABWIDTH = 2036 SCI_GETTABWIDTH = 2121 SC_CP_UTF8 = 65001 SC_CP_DBCS = 1 SCI_SETCODEPAGE = 2037 SCI_SETUSEPALETTE = 2039 MARKER_MAX = 31 SC_MARK_CIRCLE = 0 SC_MARK_ROUNDRECT = 1 SC_MARK_ARROW = 2 SC_MARK_SMALLRECT = 3 SC_MARK_SHORTARROW = 4 SC_MARK_EMPTY = 5 SC_MARK_ARROWDOWN = 6 SC_MARK_MINUS = 7 SC_MARK_PLUS = 8 SC_MARK_VLINE = 9 SC_MARK_LCORNER = 10 SC_MARK_TCORNER = 11 SC_MARK_BOXPLUS = 12 SC_MARK_BOXPLUSCONNECTED = 13 SC_MARK_BOXMINUS = 14 SC_MARK_BOXMINUSCONNECTED = 15 SC_MARK_LCORNERCURVE = 16 SC_MARK_TCORNERCURVE = 17 SC_MARK_CIRCLEPLUS = 18 SC_MARK_CIRCLEPLUSCONNECTED = 19 SC_MARK_CIRCLEMINUS = 20 SC_MARK_CIRCLEMINUSCONNECTED = 21 SC_MARK_BACKGROUND = 22 SC_MARK_DOTDOTDOT = 23 SC_MARK_ARROWS = 24 SC_MARK_PIXMAP = 25 SC_MARK_FULLRECT = 26 SC_MARK_LEFTRECT = 27 SC_MARK_CHARACTER = 10000 SC_MARKNUM_FOLDEREND = 25 SC_MARKNUM_FOLDEROPENMID = 26 SC_MARKNUM_FOLDERMIDTAIL = 27 SC_MARKNUM_FOLDERTAIL = 28 SC_MARKNUM_FOLDERSUB = 29 SC_MARKNUM_FOLDER = 30 SC_MARKNUM_FOLDEROPEN = 31 SC_MASK_FOLDERS = (-33554432) SCI_MARKERDEFINE = 2040 SCI_MARKERSETFORE = 2041 SCI_MARKERSETBACK = 2042 SCI_MARKERADD = 2043 SCI_MARKERDELETE = 2044 SCI_MARKERDELETEALL = 2045 SCI_MARKERGET = 2046 SCI_MARKERNEXT = 2047 SCI_MARKERPREVIOUS = 2048 SCI_MARKERDEFINEPIXMAP = 2049 SCI_MARKERADDSET = 2466 SCI_MARKERSETALPHA = 2476 SC_MARGIN_SYMBOL = 0 SC_MARGIN_NUMBER = 1 SC_MARGIN_BACK = 2 SC_MARGIN_FORE = 3 SCI_SETMARGINTYPEN = 2240 SCI_GETMARGINTYPEN = 2241 SCI_SETMARGINWIDTHN = 2242 SCI_GETMARGINWIDTHN = 2243 SCI_SETMARGINMASKN = 2244 SCI_GETMARGINMASKN = 2245 SCI_SETMARGINSENSITIVEN = 2246 SCI_GETMARGINSENSITIVEN = 2247 STYLE_DEFAULT = 32 STYLE_LINENUMBER = 33 STYLE_BRACELIGHT = 34 STYLE_BRACEBAD = 35 STYLE_CONTROLCHAR = 36 STYLE_INDENTGUIDE = 37 STYLE_CALLTIP = 38 STYLE_LASTPREDEFINED = 39 STYLE_MAX = 255 SC_CHARSET_ANSI = 0 SC_CHARSET_DEFAULT = 1 SC_CHARSET_BALTIC = 186 SC_CHARSET_CHINESEBIG5 = 136 SC_CHARSET_EASTEUROPE = 238 SC_CHARSET_GB2312 = 134 SC_CHARSET_GREEK = 161 SC_CHARSET_HANGUL = 129 SC_CHARSET_MAC = 77 SC_CHARSET_OEM = 255 SC_CHARSET_RUSSIAN = 204 SC_CHARSET_CYRILLIC = 1251 SC_CHARSET_SHIFTJIS = 128 SC_CHARSET_SYMBOL = 2 SC_CHARSET_TURKISH = 162 SC_CHARSET_JOHAB = 130 SC_CHARSET_HEBREW = 177 SC_CHARSET_ARABIC = 178 SC_CHARSET_VIETNAMESE = 163 SC_CHARSET_THAI = 222 SC_CHARSET_8859_15 = 1000 SCI_STYLECLEARALL = 2050 SCI_STYLESETFORE = 2051 SCI_STYLESETBACK = 2052 SCI_STYLESETBOLD = 2053 SCI_STYLESETITALIC = 2054 SCI_STYLESETSIZE = 2055 SCI_STYLESETFONT = 2056 SCI_STYLESETEOLFILLED = 2057 SCI_STYLERESETDEFAULT = 2058 SCI_STYLESETUNDERLINE = 2059 SC_CASE_MIXED = 0 SC_CASE_UPPER = 1 SC_CASE_LOWER = 2 SCI_STYLEGETFORE = 2481 SCI_STYLEGETBACK = 2482 SCI_STYLEGETBOLD = 2483 SCI_STYLEGETITALIC = 2484 SCI_STYLEGETSIZE = 2485 SCI_STYLEGETFONT = 2486 SCI_STYLEGETEOLFILLED = 2487 SCI_STYLEGETUNDERLINE = 2488 SCI_STYLEGETCASE = 2489 SCI_STYLEGETCHARACTERSET = 2490 SCI_STYLEGETVISIBLE = 2491 SCI_STYLEGETCHANGEABLE = 2492 SCI_STYLEGETHOTSPOT = 2493 SCI_STYLESETCASE = 2060 SCI_STYLESETCHARACTERSET = 2066 SCI_STYLESETHOTSPOT = 2409 SCI_SETSELFORE = 2067 SCI_SETSELBACK = 2068 SCI_GETSELALPHA = 2477 SCI_SETSELALPHA = 2478 SCI_GETSELEOLFILLED = 2479 SCI_SETSELEOLFILLED = 2480 SCI_SETCARETFORE = 2069 SCI_ASSIGNCMDKEY = 2070 SCI_CLEARCMDKEY = 2071 SCI_CLEARALLCMDKEYS = 2072 SCI_SETSTYLINGEX = 2073 SCI_STYLESETVISIBLE = 2074 SCI_GETCARETPERIOD = 2075 SCI_SETCARETPERIOD = 2076 SCI_SETWORDCHARS = 2077 SCI_BEGINUNDOACTION = 2078 SCI_ENDUNDOACTION = 2079 INDIC_PLAIN = 0 INDIC_SQUIGGLE = 1 INDIC_TT = 2 INDIC_DIAGONAL = 3 INDIC_STRIKE = 4 INDIC_HIDDEN = 5 INDIC_BOX = 6 INDIC_ROUNDBOX = 7 INDIC_MAX = 31 INDIC_CONTAINER = 8 INDIC0_MASK = 0x20 INDIC1_MASK = 0x40 INDIC2_MASK = 0x80 INDICS_MASK = 0xE0 SCI_INDICSETSTYLE = 2080 SCI_INDICGETSTYLE = 2081 SCI_INDICSETFORE = 2082 SCI_INDICGETFORE = 2083 SCI_INDICSETUNDER = 2510 SCI_INDICGETUNDER = 2511 SCI_SETWHITESPACEFORE = 2084 SCI_SETWHITESPACEBACK = 2085 SCI_SETSTYLEBITS = 2090 SCI_GETSTYLEBITS = 2091 SCI_SETLINESTATE = 2092 SCI_GETLINESTATE = 2093 SCI_GETMAXLINESTATE = 2094 SCI_GETCARETLINEVISIBLE = 2095 SCI_SETCARETLINEVISIBLE = 2096 SCI_GETCARETLINEBACK = 2097 SCI_SETCARETLINEBACK = 2098 SCI_STYLESETCHANGEABLE = 2099 SCI_AUTOCSHOW = 2100 SCI_AUTOCCANCEL = 2101 SCI_AUTOCACTIVE = 2102 SCI_AUTOCPOSSTART = 2103 SCI_AUTOCCOMPLETE = 2104 SCI_AUTOCSTOPS = 2105 SCI_AUTOCSETSEPARATOR = 2106 SCI_AUTOCGETSEPARATOR = 2107 SCI_AUTOCSELECT = 2108 SCI_AUTOCSETCANCELATSTART = 2110 SCI_AUTOCGETCANCELATSTART = 2111 SCI_AUTOCSETFILLUPS = 2112 SCI_AUTOCSETCHOOSESINGLE = 2113 SCI_AUTOCGETCHOOSESINGLE = 2114 SCI_AUTOCSETIGNORECASE = 2115 SCI_AUTOCGETIGNORECASE = 2116 SCI_USERLISTSHOW = 2117 SCI_AUTOCSETAUTOHIDE = 2118 SCI_AUTOCGETAUTOHIDE = 2119 SCI_AUTOCSETDROPRESTOFWORD = 2270 SCI_AUTOCGETDROPRESTOFWORD = 2271 SCI_REGISTERIMAGE = 2405 SCI_CLEARREGISTEREDIMAGES = 2408 SCI_AUTOCGETTYPESEPARATOR = 2285 SCI_AUTOCSETTYPESEPARATOR = 2286 SCI_AUTOCSETMAXWIDTH = 2208 SCI_AUTOCGETMAXWIDTH = 2209 SCI_AUTOCSETMAXHEIGHT = 2210 SCI_AUTOCGETMAXHEIGHT = 2211 SCI_SETINDENT = 2122 SCI_GETINDENT = 2123 SCI_SETUSETABS = 2124 SCI_GETUSETABS = 2125 SCI_SETLINEINDENTATION = 2126 SCI_GETLINEINDENTATION = 2127 SCI_GETLINEINDENTPOSITION = 2128 SCI_GETCOLUMN = 2129 SCI_SETHSCROLLBAR = 2130 SCI_GETHSCROLLBAR = 2131 SC_IV_NONE = 0 SC_IV_REAL = 1 SC_IV_LOOKFORWARD = 2 SC_IV_LOOKBOTH = 3 SCI_SETINDENTATIONGUIDES = 2132 SCI_GETINDENTATIONGUIDES = 2133 SCI_SETHIGHLIGHTGUIDE = 2134 SCI_GETHIGHLIGHTGUIDE = 2135 SCI_GETLINEENDPOSITION = 2136 SCI_GETCODEPAGE = 2137 SCI_GETCARETFORE = 2138 SCI_GETUSEPALETTE = 2139 SCI_GETREADONLY = 2140 SCI_SETCURRENTPOS = 2141 SCI_SETSELECTIONSTART = 2142 SCI_GETSELECTIONSTART = 2143 SCI_SETSELECTIONEND = 2144 SCI_GETSELECTIONEND = 2145 SCI_SETPRINTMAGNIFICATION = 2146 SCI_GETPRINTMAGNIFICATION = 2147 SC_PRINT_NORMAL = 0 SC_PRINT_INVERTLIGHT = 1 SC_PRINT_BLACKONWHITE = 2 SC_PRINT_COLOURONWHITE = 3 SC_PRINT_COLOURONWHITEDEFAULTBG = 4 SCI_SETPRINTCOLOURMODE = 2148 SCI_GETPRINTCOLOURMODE = 2149 SCFIND_WHOLEWORD = 2 SCFIND_MATCHCASE = 4 SCFIND_WORDSTART = 0x00100000 SCFIND_REGEXP = 0x00200000 SCFIND_POSIX = 0x00400000 SCI_FINDTEXT = 2150 SCI_FORMATRANGE = 2151 SCI_GETFIRSTVISIBLELINE = 2152 SCI_GETLINE = 2153 SCI_GETLINECOUNT = 2154 SCI_SETMARGINLEFT = 2155 SCI_GETMARGINLEFT = 2156 SCI_SETMARGINRIGHT = 2157 SCI_GETMARGINRIGHT = 2158 SCI_GETMODIFY = 2159 SCI_SETSEL = 2160 SCI_GETSELTEXT = 2161 SCI_GETTEXTRANGE = 2162 SCI_HIDESELECTION = 2163 SCI_POINTXFROMPOSITION = 2164 SCI_POINTYFROMPOSITION = 2165 SCI_LINEFROMPOSITION = 2166 SCI_POSITIONFROMLINE = 2167 SCI_LINESCROLL = 2168 SCI_SCROLLCARET = 2169 SCI_REPLACESEL = 2170 SCI_SETREADONLY = 2171 SCI_NULL = 2172 SCI_CANPASTE = 2173 SCI_CANUNDO = 2174 SCI_EMPTYUNDOBUFFER = 2175 SCI_UNDO = 2176 SCI_CUT = 2177 SCI_COPY = 2178 SCI_PASTE = 2179 SCI_CLEAR = 2180 SCI_SETTEXT = 2181 SCI_GETTEXT = 2182 SCI_GETTEXTLENGTH = 2183 SCI_GETDIRECTFUNCTION = 2184 SCI_GETDIRECTPOINTER = 2185 SCI_SETOVERTYPE = 2186 SCI_GETOVERTYPE = 2187 SCI_SETCARETWIDTH = 2188 SCI_GETCARETWIDTH = 2189 SCI_SETTARGETSTART = 2190 SCI_GETTARGETSTART = 2191 SCI_SETTARGETEND = 2192 SCI_GETTARGETEND = 2193 SCI_REPLACETARGET = 2194 SCI_REPLACETARGETRE = 2195 SCI_SEARCHINTARGET = 2197 SCI_SETSEARCHFLAGS = 2198 SCI_GETSEARCHFLAGS = 2199 SCI_CALLTIPSHOW = 2200 SCI_CALLTIPCANCEL = 2201 SCI_CALLTIPACTIVE = 2202 SCI_CALLTIPPOSSTART = 2203 SCI_CALLTIPSETHLT = 2204 SCI_CALLTIPSETBACK = 2205 SCI_CALLTIPSETFORE = 2206 SCI_CALLTIPSETFOREHLT = 2207 SCI_CALLTIPUSESTYLE = 2212 SCI_VISIBLEFROMDOCLINE = 2220 SCI_DOCLINEFROMVISIBLE = 2221 SCI_WRAPCOUNT = 2235 SC_FOLDLEVELBASE = 0x400 SC_FOLDLEVELWHITEFLAG = 0x1000 SC_FOLDLEVELHEADERFLAG = 0x2000 SC_FOLDLEVELBOXHEADERFLAG = 0x4000 SC_FOLDLEVELBOXFOOTERFLAG = 0x8000 SC_FOLDLEVELCONTRACTED = 0x10000 SC_FOLDLEVELUNINDENT = 0x20000 SC_FOLDLEVELNUMBERMASK = 0x0FFF SCI_SETFOLDLEVEL = 2222 SCI_GETFOLDLEVEL = 2223 SCI_GETLASTCHILD = 2224 SCI_GETFOLDPARENT = 2225 SCI_SHOWLINES = 2226 SCI_HIDELINES = 2227 SCI_GETLINEVISIBLE = 2228 SCI_SETFOLDEXPANDED = 2229 SCI_GETFOLDEXPANDED = 2230 SCI_TOGGLEFOLD = 2231 SCI_ENSUREVISIBLE = 2232 SC_FOLDFLAG_LINEBEFORE_EXPANDED = 0x0002 SC_FOLDFLAG_LINEBEFORE_CONTRACTED = 0x0004 SC_FOLDFLAG_LINEAFTER_EXPANDED = 0x0008 SC_FOLDFLAG_LINEAFTER_CONTRACTED = 0x0010 SC_FOLDFLAG_LEVELNUMBERS = 0x0040 SC_FOLDFLAG_BOX = 0x0001 SCI_SETFOLDFLAGS = 2233 SCI_ENSUREVISIBLEENFORCEPOLICY = 2234 SCI_SETTABINDENTS = 2260 SCI_GETTABINDENTS = 2261 SCI_SETBACKSPACEUNINDENTS = 2262 SCI_GETBACKSPACEUNINDENTS = 2263 SC_TIME_FOREVER = 10000000 SCI_SETMOUSEDWELLTIME = 2264 SCI_GETMOUSEDWELLTIME = 2265 SCI_WORDSTARTPOSITION = 2266 SCI_WORDENDPOSITION = 2267 SC_WRAP_NONE = 0 SC_WRAP_WORD = 1 SC_WRAP_CHAR = 2 SCI_SETWRAPMODE = 2268 SCI_GETWRAPMODE = 2269 SC_WRAPVISUALFLAG_NONE = 0x0000 SC_WRAPVISUALFLAG_END = 0x0001 SC_WRAPVISUALFLAG_START = 0x0002 SCI_SETWRAPVISUALFLAGS = 2460 SCI_GETWRAPVISUALFLAGS = 2461 SC_WRAPVISUALFLAGLOC_DEFAULT = 0x0000 SC_WRAPVISUALFLAGLOC_END_BY_TEXT = 0x0001 SC_WRAPVISUALFLAGLOC_START_BY_TEXT = 0x0002 SCI_SETWRAPVISUALFLAGSLOCATION = 2462 SCI_GETWRAPVISUALFLAGSLOCATION = 2463 SCI_SETWRAPSTARTINDENT = 2464 SCI_GETWRAPSTARTINDENT = 2465 SC_CACHE_NONE = 0 SC_CACHE_CARET = 1 SC_CACHE_PAGE = 2 SC_CACHE_DOCUMENT = 3 SCI_SETLAYOUTCACHE = 2272 SCI_GETLAYOUTCACHE = 2273 SCI_SETSCROLLWIDTH = 2274 SCI_GETSCROLLWIDTH = 2275 SCI_SETSCROLLWIDTHTRACKING = 2516 SCI_GETSCROLLWIDTHTRACKING = 2517 SCI_TEXTWIDTH = 2276 SCI_SETENDATLASTLINE = 2277 SCI_GETENDATLASTLINE = 2278 SCI_TEXTHEIGHT = 2279 SCI_SETVSCROLLBAR = 2280 SCI_GETVSCROLLBAR = 2281 SCI_APPENDTEXT = 2282 SCI_GETTWOPHASEDRAW = 2283 SCI_SETTWOPHASEDRAW = 2284 SCI_TARGETFROMSELECTION = 2287 SCI_LINESJOIN = 2288 SCI_LINESSPLIT = 2289 SCI_SETFOLDMARGINCOLOUR = 2290 SCI_SETFOLDMARGINHICOLOUR = 2291 SCI_LINEDOWN = 2300 SCI_LINEDOWNEXTEND = 2301 SCI_LINEUP = 2302 SCI_LINEUPEXTEND = 2303 SCI_CHARLEFT = 2304 SCI_CHARLEFTEXTEND = 2305 SCI_CHARRIGHT = 2306 SCI_CHARRIGHTEXTEND = 2307 SCI_WORDLEFT = 2308 SCI_WORDLEFTEXTEND = 2309 SCI_WORDRIGHT = 2310 SCI_WORDRIGHTEXTEND = 2311 SCI_HOME = 2312 SCI_HOMEEXTEND = 2313 SCI_LINEEND = 2314 SCI_LINEENDEXTEND = 2315 SCI_DOCUMENTSTART = 2316 SCI_DOCUMENTSTARTEXTEND = 2317 SCI_DOCUMENTEND = 2318 SCI_DOCUMENTENDEXTEND = 2319 SCI_PAGEUP = 2320 SCI_PAGEUPEXTEND = 2321 SCI_PAGEDOWN = 2322 SCI_PAGEDOWNEXTEND = 2323 SCI_EDITTOGGLEOVERTYPE = 2324 SCI_CANCEL = 2325 SCI_DELETEBACK = 2326 SCI_TAB = 2327 SCI_BACKTAB = 2328 SCI_NEWLINE = 2329 SCI_FORMFEED = 2330 SCI_VCHOME = 2331 SCI_VCHOMEEXTEND = 2332 SCI_ZOOMIN = 2333 SCI_ZOOMOUT = 2334 SCI_DELWORDLEFT = 2335 SCI_DELWORDRIGHT = 2336 SCI_DELWORDRIGHTEND = 2518 SCI_LINECUT = 2337 SCI_LINEDELETE = 2338 SCI_LINETRANSPOSE = 2339 SCI_LINEDUPLICATE = 2404 SCI_LOWERCASE = 2340 SCI_UPPERCASE = 2341 SCI_LINESCROLLDOWN = 2342 SCI_LINESCROLLUP = 2343 SCI_DELETEBACKNOTLINE = 2344 SCI_HOMEDISPLAY = 2345 SCI_HOMEDISPLAYEXTEND = 2346 SCI_LINEENDDISPLAY = 2347 SCI_LINEENDDISPLAYEXTEND = 2348 SCI_HOMEWRAP = 2349 SCI_HOMEWRAPEXTEND = 2450 SCI_LINEENDWRAP = 2451 SCI_LINEENDWRAPEXTEND = 2452 SCI_VCHOMEWRAP = 2453 SCI_VCHOMEWRAPEXTEND = 2454 SCI_LINECOPY = 2455 SCI_MOVECARETINSIDEVIEW = 2401 SCI_LINELENGTH = 2350 SCI_BRACEHIGHLIGHT = 2351 SCI_BRACEBADLIGHT = 2352 SCI_BRACEMATCH = 2353 SCI_GETVIEWEOL = 2355 SCI_SETVIEWEOL = 2356 SCI_GETDOCPOINTER = 2357 SCI_SETDOCPOINTER = 2358 SCI_SETMODEVENTMASK = 2359 EDGE_NONE = 0 EDGE_LINE = 1 EDGE_BACKGROUND = 2 SCI_GETEDGECOLUMN = 2360 SCI_SETEDGECOLUMN = 2361 SCI_GETEDGEMODE = 2362 SCI_SETEDGEMODE = 2363 SCI_GETEDGECOLOUR = 2364 SCI_SETEDGECOLOUR = 2365 SCI_SEARCHANCHOR = 2366 SCI_SEARCHNEXT = 2367 SCI_SEARCHPREV = 2368 SCI_LINESONSCREEN = 2370 SCI_USEPOPUP = 2371 SCI_SELECTIONISRECTANGLE = 2372 SCI_SETZOOM = 2373 SCI_GETZOOM = 2374 SCI_CREATEDOCUMENT = 2375 SCI_ADDREFDOCUMENT = 2376 SCI_RELEASEDOCUMENT = 2377 SCI_GETMODEVENTMASK = 2378 SCI_SETFOCUS = 2380 SCI_GETFOCUS = 2381 SCI_SETSTATUS = 2382 SCI_GETSTATUS = 2383 SCI_SETMOUSEDOWNCAPTURES = 2384 SCI_GETMOUSEDOWNCAPTURES = 2385 SC_CURSORNORMAL = -1 SC_CURSORWAIT = 4 SCI_SETCURSOR = 2386 SCI_GETCURSOR = 2387 SCI_SETCONTROLCHARSYMBOL = 2388 SCI_GETCONTROLCHARSYMBOL = 2389 SCI_WORDPARTLEFT = 2390 SCI_WORDPARTLEFTEXTEND = 2391 SCI_WORDPARTRIGHT = 2392 SCI_WORDPARTRIGHTEXTEND = 2393 VISIBLE_SLOP = 0x01 VISIBLE_STRICT = 0x04 SCI_SETVISIBLEPOLICY = 2394 SCI_DELLINELEFT = 2395 SCI_DELLINERIGHT = 2396 SCI_SETXOFFSET = 2397 SCI_GETXOFFSET = 2398 SCI_CHOOSECARETX = 2399 SCI_GRABFOCUS = 2400 CARET_SLOP = 0x01 CARET_STRICT = 0x04 CARET_JUMPS = 0x10 CARET_EVEN = 0x08 SCI_SETXCARETPOLICY = 2402 SCI_SETYCARETPOLICY = 2403 SCI_SETPRINTWRAPMODE = 2406 SCI_GETPRINTWRAPMODE = 2407 SCI_SETHOTSPOTACTIVEFORE = 2410 SCI_GETHOTSPOTACTIVEFORE = 2494 SCI_SETHOTSPOTACTIVEBACK = 2411 SCI_GETHOTSPOTACTIVEBACK = 2495 SCI_SETHOTSPOTACTIVEUNDERLINE = 2412 SCI_GETHOTSPOTACTIVEUNDERLINE = 2496 SCI_SETHOTSPOTSINGLELINE = 2421 SCI_GETHOTSPOTSINGLELINE = 2497 SCI_PARADOWN = 2413 SCI_PARADOWNEXTEND = 2414 SCI_PARAUP = 2415 SCI_PARAUPEXTEND = 2416 SCI_POSITIONBEFORE = 2417 SCI_POSITIONAFTER = 2418 SCI_COPYRANGE = 2419 SCI_COPYTEXT = 2420 SC_SEL_STREAM = 0 SC_SEL_RECTANGLE = 1 SC_SEL_LINES = 2 SCI_SETSELECTIONMODE = 2422 SCI_GETSELECTIONMODE = 2423 SCI_GETLINESELSTARTPOSITION = 2424 SCI_GETLINESELENDPOSITION = 2425 SCI_LINEDOWNRECTEXTEND = 2426 SCI_LINEUPRECTEXTEND = 2427 SCI_CHARLEFTRECTEXTEND = 2428 SCI_CHARRIGHTRECTEXTEND = 2429 SCI_HOMERECTEXTEND = 2430 SCI_VCHOMERECTEXTEND = 2431 SCI_LINEENDRECTEXTEND = 2432 SCI_PAGEUPRECTEXTEND = 2433 SCI_PAGEDOWNRECTEXTEND = 2434 SCI_STUTTEREDPAGEUP = 2435 SCI_STUTTEREDPAGEUPEXTEND = 2436 SCI_STUTTEREDPAGEDOWN = 2437 SCI_STUTTEREDPAGEDOWNEXTEND = 2438 SCI_WORDLEFTEND = 2439 SCI_WORDLEFTENDEXTEND = 2440 SCI_WORDRIGHTEND = 2441 SCI_WORDRIGHTENDEXTEND = 2442 SCI_SETWHITESPACECHARS = 2443 SCI_SETCHARSDEFAULT = 2444 SCI_AUTOCGETCURRENT = 2445 SCI_ALLOCATE = 2446 SCI_TARGETASUTF8 = 2447 SCI_SETLENGTHFORENCODE = 2448 SCI_ENCODEDFROMUTF8 = 2449 SCI_FINDCOLUMN = 2456 SCI_GETCARETSTICKY = 2457 SCI_SETCARETSTICKY = 2458 SCI_TOGGLECARETSTICKY = 2459 SCI_SETPASTECONVERTENDINGS = 2467 SCI_GETPASTECONVERTENDINGS = 2468 SCI_SELECTIONDUPLICATE = 2469 SC_ALPHA_TRANSPARENT = 0 SC_ALPHA_OPAQUE = 255 SC_ALPHA_NOALPHA = 256 SCI_SETCARETLINEBACKALPHA = 2470 SCI_GETCARETLINEBACKALPHA = 2471 CARETSTYLE_INVISIBLE = 0 CARETSTYLE_LINE = 1 CARETSTYLE_BLOCK = 2 SCI_SETCARETSTYLE = 2512 SCI_GETCARETSTYLE = 2513 SCI_SETINDICATORCURRENT = 2500 SCI_GETINDICATORCURRENT = 2501 SCI_SETINDICATORVALUE = 2502 SCI_GETINDICATORVALUE = 2503 SCI_INDICATORFILLRANGE = 2504 SCI_INDICATORCLEARRANGE = 2505 SCI_INDICATORALLONFOR = 2506 SCI_INDICATORVALUEAT = 2507 SCI_INDICATORSTART = 2508 SCI_INDICATOREND = 2509 SCI_SETPOSITIONCACHE = 2514 SCI_GETPOSITIONCACHE = 2515 SCI_COPYALLOWLINE = 2519 SCI_GETCHARACTERPOINTER = 2520 SCI_SETKEYSUNICODE = 2521 SCI_GETKEYSUNICODE = 2522 SCI_STARTRECORD = 3001 SCI_STOPRECORD = 3002 SCI_SETLEXER = 4001 SCI_GETLEXER = 4002 SCI_COLOURISE = 4003 SCI_SETPROPERTY = 4004 KEYWORDSET_MAX = 8 SCI_SETKEYWORDS = 4005 SCI_SETLEXERLANGUAGE = 4006 SCI_LOADLEXERLIBRARY = 4007 SCI_GETPROPERTY = 4008 SCI_GETPROPERTYEXPANDED = 4009 SCI_GETPROPERTYINT = 4010 SCI_GETSTYLEBITSNEEDED = 4011 SC_MOD_INSERTTEXT = 0x1 SC_MOD_DELETETEXT = 0x2 SC_MOD_CHANGESTYLE = 0x4 SC_MOD_CHANGEFOLD = 0x8 SC_PERFORMED_USER = 0x10 SC_PERFORMED_UNDO = 0x20 SC_PERFORMED_REDO = 0x40 SC_MULTISTEPUNDOREDO = 0x80 SC_LASTSTEPINUNDOREDO = 0x100 SC_MOD_CHANGEMARKER = 0x200 SC_MOD_BEFOREINSERT = 0x400 SC_MOD_BEFOREDELETE = 0x800 SC_MULTILINEUNDOREDO = 0x1000 SC_STARTACTION = 0x2000 SC_MOD_CHANGEINDICATOR = 0x4000 SC_MOD_CHANGELINESTATE = 0x8000 SC_MODEVENTMASKALL = 0xFFFF SCEN_CHANGE = 768 SCEN_SETFOCUS = 512 SCEN_KILLFOCUS = 256 SCK_DOWN = 300 SCK_UP = 301 SCK_LEFT = 302 SCK_RIGHT = 303 SCK_HOME = 304 SCK_END = 305 SCK_PRIOR = 306 SCK_NEXT = 307 SCK_DELETE = 308 SCK_INSERT = 309 SCK_ESCAPE = 7 SCK_BACK = 8 SCK_TAB = 9 SCK_RETURN = 13 SCK_ADD = 310 SCK_SUBTRACT = 311 SCK_DIVIDE = 312 SCK_WIN = 313 SCK_RWIN = 314 SCK_MENU = 315 SCMOD_NORM = 0 SCMOD_SHIFT = 1 SCMOD_CTRL = 2 SCMOD_ALT = 4 SCN_STYLENEEDED = 2000 SCN_CHARADDED = 2001 SCN_SAVEPOINTREACHED = 2002 SCN_SAVEPOINTLEFT = 2003 SCN_MODIFYATTEMPTRO = 2004 SCN_KEY = 2005 SCN_DOUBLECLICK = 2006 SCN_UPDATEUI = 2007 SCN_MODIFIED = 2008 SCN_MACRORECORD = 2009 SCN_MARGINCLICK = 2010 SCN_NEEDSHOWN = 2011 SCN_PAINTED = 2013 SCN_USERLISTSELECTION = 2014 SCN_URIDROPPED = 2015 SCN_DWELLSTART = 2016 SCN_DWELLEND = 2017 SCN_ZOOM = 2018 SCN_HOTSPOTCLICK = 2019 SCN_HOTSPOTDOUBLECLICK = 2020 SCN_CALLTIPCLICK = 2021 SCN_AUTOCSELECTION = 2022 SCN_INDICATORCLICK = 2023 SCN_INDICATORRELEASE = 2024 SCN_AUTOCCANCELLED = 2025 SCI_SETCARETPOLICY = 2369 CARET_CENTER = 0x02 CARET_XEVEN = 0x08 CARET_XJUMPS = 0x10 SCN_POSCHANGED = 2012 SCN_CHECKBRACE = 2007 # Generated by h2py from Include\scilexer.h SCLEX_CONTAINER = 0 SCLEX_NULL = 1 SCLEX_PYTHON = 2 SCLEX_CPP = 3 SCLEX_HTML = 4 SCLEX_XML = 5 SCLEX_PERL = 6 SCLEX_SQL = 7 SCLEX_VB = 8 SCLEX_PROPERTIES = 9 SCLEX_ERRORLIST = 10 SCLEX_MAKEFILE = 11 SCLEX_BATCH = 12 SCLEX_XCODE = 13 SCLEX_LATEX = 14 SCLEX_LUA = 15 SCLEX_DIFF = 16 SCLEX_CONF = 17 SCLEX_PASCAL = 18 SCLEX_AVE = 19 SCLEX_ADA = 20 SCLEX_LISP = 21 SCLEX_RUBY = 22 SCLEX_EIFFEL = 23 SCLEX_EIFFELKW = 24 SCLEX_TCL = 25 SCLEX_NNCRONTAB = 26 SCLEX_BULLANT = 27 SCLEX_VBSCRIPT = 28 SCLEX_BAAN = 31 SCLEX_MATLAB = 32 SCLEX_SCRIPTOL = 33 SCLEX_ASM = 34 SCLEX_CPPNOCASE = 35 SCLEX_FORTRAN = 36 SCLEX_F77 = 37 SCLEX_CSS = 38 SCLEX_POV = 39 SCLEX_LOUT = 40 SCLEX_ESCRIPT = 41 SCLEX_PS = 42 SCLEX_NSIS = 43 SCLEX_MMIXAL = 44 SCLEX_CLW = 45 SCLEX_CLWNOCASE = 46 SCLEX_LOT = 47 SCLEX_YAML = 48 SCLEX_TEX = 49 SCLEX_METAPOST = 50 SCLEX_POWERBASIC = 51 SCLEX_FORTH = 52 SCLEX_ERLANG = 53 SCLEX_OCTAVE = 54 SCLEX_MSSQL = 55 SCLEX_VERILOG = 56 SCLEX_KIX = 57 SCLEX_GUI4CLI = 58 SCLEX_SPECMAN = 59 SCLEX_AU3 = 60 SCLEX_APDL = 61 SCLEX_BASH = 62 SCLEX_ASN1 = 63 SCLEX_VHDL = 64 SCLEX_CAML = 65 SCLEX_BLITZBASIC = 66 SCLEX_PUREBASIC = 67 SCLEX_HASKELL = 68 SCLEX_PHPSCRIPT = 69 SCLEX_TADS3 = 70 SCLEX_REBOL = 71 SCLEX_SMALLTALK = 72 SCLEX_FLAGSHIP = 73 SCLEX_CSOUND = 74 SCLEX_FREEBASIC = 75 SCLEX_INNOSETUP = 76 SCLEX_OPAL = 77 SCLEX_SPICE = 78 SCLEX_D = 79 SCLEX_CMAKE = 80 SCLEX_GAP = 81 SCLEX_PLM = 82 SCLEX_PROGRESS = 83 SCLEX_ABAQUS = 84 SCLEX_ASYMPTOTE = 85 SCLEX_R = 86 SCLEX_MAGIK = 87 SCLEX_POWERSHELL = 88 SCLEX_MYSQL = 89 SCLEX_PO = 90 SCLEX_AUTOMATIC = 1000 SCE_P_DEFAULT = 0 SCE_P_COMMENTLINE = 1 SCE_P_NUMBER = 2 SCE_P_STRING = 3 SCE_P_CHARACTER = 4 SCE_P_WORD = 5 SCE_P_TRIPLE = 6 SCE_P_TRIPLEDOUBLE = 7 SCE_P_CLASSNAME = 8 SCE_P_DEFNAME = 9 SCE_P_OPERATOR = 10 SCE_P_IDENTIFIER = 11 SCE_P_COMMENTBLOCK = 12 SCE_P_STRINGEOL = 13 SCE_P_WORD2 = 14 SCE_P_DECORATOR = 15 SCE_C_DEFAULT = 0 SCE_C_COMMENT = 1 SCE_C_COMMENTLINE = 2 SCE_C_COMMENTDOC = 3 SCE_C_NUMBER = 4 SCE_C_WORD = 5 SCE_C_STRING = 6 SCE_C_CHARACTER = 7 SCE_C_UUID = 8 SCE_C_PREPROCESSOR = 9 SCE_C_OPERATOR = 10 SCE_C_IDENTIFIER = 11 SCE_C_STRINGEOL = 12 SCE_C_VERBATIM = 13 SCE_C_REGEX = 14 SCE_C_COMMENTLINEDOC = 15 SCE_C_WORD2 = 16 SCE_C_COMMENTDOCKEYWORD = 17 SCE_C_COMMENTDOCKEYWORDERROR = 18 SCE_C_GLOBALCLASS = 19 SCE_D_DEFAULT = 0 SCE_D_COMMENT = 1 SCE_D_COMMENTLINE = 2 SCE_D_COMMENTDOC = 3 SCE_D_COMMENTNESTED = 4 SCE_D_NUMBER = 5 SCE_D_WORD = 6 SCE_D_WORD2 = 7 SCE_D_WORD3 = 8 SCE_D_TYPEDEF = 9 SCE_D_STRING = 10 SCE_D_STRINGEOL = 11 SCE_D_CHARACTER = 12 SCE_D_OPERATOR = 13 SCE_D_IDENTIFIER = 14 SCE_D_COMMENTLINEDOC = 15 SCE_D_COMMENTDOCKEYWORD = 16 SCE_D_COMMENTDOCKEYWORDERROR = 17 SCE_TCL_DEFAULT = 0 SCE_TCL_COMMENT = 1 SCE_TCL_COMMENTLINE = 2 SCE_TCL_NUMBER = 3 SCE_TCL_WORD_IN_QUOTE = 4 SCE_TCL_IN_QUOTE = 5 SCE_TCL_OPERATOR = 6 SCE_TCL_IDENTIFIER = 7 SCE_TCL_SUBSTITUTION = 8 SCE_TCL_SUB_BRACE = 9 SCE_TCL_MODIFIER = 10 SCE_TCL_EXPAND = 11 SCE_TCL_WORD = 12 SCE_TCL_WORD2 = 13 SCE_TCL_WORD3 = 14 SCE_TCL_WORD4 = 15 SCE_TCL_WORD5 = 16 SCE_TCL_WORD6 = 17 SCE_TCL_WORD7 = 18 SCE_TCL_WORD8 = 19 SCE_TCL_COMMENT_BOX = 20 SCE_TCL_BLOCK_COMMENT = 21 SCE_H_DEFAULT = 0 SCE_H_TAG = 1 SCE_H_TAGUNKNOWN = 2 SCE_H_ATTRIBUTE = 3 SCE_H_ATTRIBUTEUNKNOWN = 4 SCE_H_NUMBER = 5 SCE_H_DOUBLESTRING = 6 SCE_H_SINGLESTRING = 7 SCE_H_OTHER = 8 SCE_H_COMMENT = 9 SCE_H_ENTITY = 10 SCE_H_TAGEND = 11 SCE_H_XMLSTART = 12 SCE_H_XMLEND = 13 SCE_H_SCRIPT = 14 SCE_H_ASP = 15 SCE_H_ASPAT = 16 SCE_H_CDATA = 17 SCE_H_QUESTION = 18 SCE_H_VALUE = 19 SCE_H_XCCOMMENT = 20 SCE_H_SGML_DEFAULT = 21 SCE_H_SGML_COMMAND = 22 SCE_H_SGML_1ST_PARAM = 23 SCE_H_SGML_DOUBLESTRING = 24 SCE_H_SGML_SIMPLESTRING = 25 SCE_H_SGML_ERROR = 26 SCE_H_SGML_SPECIAL = 27 SCE_H_SGML_ENTITY = 28 SCE_H_SGML_COMMENT = 29 SCE_H_SGML_1ST_PARAM_COMMENT = 30 SCE_H_SGML_BLOCK_DEFAULT = 31 SCE_HJ_START = 40 SCE_HJ_DEFAULT = 41 SCE_HJ_COMMENT = 42 SCE_HJ_COMMENTLINE = 43 SCE_HJ_COMMENTDOC = 44 SCE_HJ_NUMBER = 45 SCE_HJ_WORD = 46 SCE_HJ_KEYWORD = 47 SCE_HJ_DOUBLESTRING = 48 SCE_HJ_SINGLESTRING = 49 SCE_HJ_SYMBOLS = 50 SCE_HJ_STRINGEOL = 51 SCE_HJ_REGEX = 52 SCE_HJA_START = 55 SCE_HJA_DEFAULT = 56 SCE_HJA_COMMENT = 57 SCE_HJA_COMMENTLINE = 58 SCE_HJA_COMMENTDOC = 59 SCE_HJA_NUMBER = 60 SCE_HJA_WORD = 61 SCE_HJA_KEYWORD = 62 SCE_HJA_DOUBLESTRING = 63 SCE_HJA_SINGLESTRING = 64 SCE_HJA_SYMBOLS = 65 SCE_HJA_STRINGEOL = 66 SCE_HJA_REGEX = 67 SCE_HB_START = 70 SCE_HB_DEFAULT = 71 SCE_HB_COMMENTLINE = 72 SCE_HB_NUMBER = 73 SCE_HB_WORD = 74 SCE_HB_STRING = 75 SCE_HB_IDENTIFIER = 76 SCE_HB_STRINGEOL = 77 SCE_HBA_START = 80 SCE_HBA_DEFAULT = 81 SCE_HBA_COMMENTLINE = 82 SCE_HBA_NUMBER = 83 SCE_HBA_WORD = 84 SCE_HBA_STRING = 85 SCE_HBA_IDENTIFIER = 86 SCE_HBA_STRINGEOL = 87 SCE_HP_START = 90 SCE_HP_DEFAULT = 91 SCE_HP_COMMENTLINE = 92 SCE_HP_NUMBER = 93 SCE_HP_STRING = 94 SCE_HP_CHARACTER = 95 SCE_HP_WORD = 96 SCE_HP_TRIPLE = 97 SCE_HP_TRIPLEDOUBLE = 98 SCE_HP_CLASSNAME = 99 SCE_HP_DEFNAME = 100 SCE_HP_OPERATOR = 101 SCE_HP_IDENTIFIER = 102 SCE_HPHP_COMPLEX_VARIABLE = 104 SCE_HPA_START = 105 SCE_HPA_DEFAULT = 106 SCE_HPA_COMMENTLINE = 107 SCE_HPA_NUMBER = 108 SCE_HPA_STRING = 109 SCE_HPA_CHARACTER = 110 SCE_HPA_WORD = 111 SCE_HPA_TRIPLE = 112 SCE_HPA_TRIPLEDOUBLE = 113 SCE_HPA_CLASSNAME = 114 SCE_HPA_DEFNAME = 115 SCE_HPA_OPERATOR = 116 SCE_HPA_IDENTIFIER = 117 SCE_HPHP_DEFAULT = 118 SCE_HPHP_HSTRING = 119 SCE_HPHP_SIMPLESTRING = 120 SCE_HPHP_WORD = 121 SCE_HPHP_NUMBER = 122 SCE_HPHP_VARIABLE = 123 SCE_HPHP_COMMENT = 124 SCE_HPHP_COMMENTLINE = 125 SCE_HPHP_HSTRING_VARIABLE = 126 SCE_HPHP_OPERATOR = 127 SCE_PL_DEFAULT = 0 SCE_PL_ERROR = 1 SCE_PL_COMMENTLINE = 2 SCE_PL_POD = 3 SCE_PL_NUMBER = 4 SCE_PL_WORD = 5 SCE_PL_STRING = 6 SCE_PL_CHARACTER = 7 SCE_PL_PUNCTUATION = 8 SCE_PL_PREPROCESSOR = 9 SCE_PL_OPERATOR = 10 SCE_PL_IDENTIFIER = 11 SCE_PL_SCALAR = 12 SCE_PL_ARRAY = 13 SCE_PL_HASH = 14 SCE_PL_SYMBOLTABLE = 15 SCE_PL_VARIABLE_INDEXER = 16 SCE_PL_REGEX = 17 SCE_PL_REGSUBST = 18 SCE_PL_LONGQUOTE = 19 SCE_PL_BACKTICKS = 20 SCE_PL_DATASECTION = 21 SCE_PL_HERE_DELIM = 22 SCE_PL_HERE_Q = 23 SCE_PL_HERE_QQ = 24 SCE_PL_HERE_QX = 25 SCE_PL_STRING_Q = 26 SCE_PL_STRING_QQ = 27 SCE_PL_STRING_QX = 28 SCE_PL_STRING_QR = 29 SCE_PL_STRING_QW = 30 SCE_PL_POD_VERB = 31 SCE_PL_SUB_PROTOTYPE = 40 SCE_PL_FORMAT_IDENT = 41 SCE_PL_FORMAT = 42 SCE_RB_DEFAULT = 0 SCE_RB_ERROR = 1 SCE_RB_COMMENTLINE = 2 SCE_RB_POD = 3 SCE_RB_NUMBER = 4 SCE_RB_WORD = 5 SCE_RB_STRING = 6 SCE_RB_CHARACTER = 7 SCE_RB_CLASSNAME = 8 SCE_RB_DEFNAME = 9 SCE_RB_OPERATOR = 10 SCE_RB_IDENTIFIER = 11 SCE_RB_REGEX = 12 SCE_RB_GLOBAL = 13 SCE_RB_SYMBOL = 14 SCE_RB_MODULE_NAME = 15 SCE_RB_INSTANCE_VAR = 16 SCE_RB_CLASS_VAR = 17 SCE_RB_BACKTICKS = 18 SCE_RB_DATASECTION = 19 SCE_RB_HERE_DELIM = 20 SCE_RB_HERE_Q = 21 SCE_RB_HERE_QQ = 22 SCE_RB_HERE_QX = 23 SCE_RB_STRING_Q = 24 SCE_RB_STRING_QQ = 25 SCE_RB_STRING_QX = 26 SCE_RB_STRING_QR = 27 SCE_RB_STRING_QW = 28 SCE_RB_WORD_DEMOTED = 29 SCE_RB_STDIN = 30 SCE_RB_STDOUT = 31 SCE_RB_STDERR = 40 SCE_RB_UPPER_BOUND = 41 SCE_B_DEFAULT = 0 SCE_B_COMMENT = 1 SCE_B_NUMBER = 2 SCE_B_KEYWORD = 3 SCE_B_STRING = 4 SCE_B_PREPROCESSOR = 5 SCE_B_OPERATOR = 6 SCE_B_IDENTIFIER = 7 SCE_B_DATE = 8 SCE_B_STRINGEOL = 9 SCE_B_KEYWORD2 = 10 SCE_B_KEYWORD3 = 11 SCE_B_KEYWORD4 = 12 SCE_B_CONSTANT = 13 SCE_B_ASM = 14 SCE_B_LABEL = 15 SCE_B_ERROR = 16 SCE_B_HEXNUMBER = 17 SCE_B_BINNUMBER = 18 SCE_PROPS_DEFAULT = 0 SCE_PROPS_COMMENT = 1 SCE_PROPS_SECTION = 2 SCE_PROPS_ASSIGNMENT = 3 SCE_PROPS_DEFVAL = 4 SCE_PROPS_KEY = 5 SCE_L_DEFAULT = 0 SCE_L_COMMAND = 1 SCE_L_TAG = 2 SCE_L_MATH = 3 SCE_L_COMMENT = 4 SCE_LUA_DEFAULT = 0 SCE_LUA_COMMENT = 1 SCE_LUA_COMMENTLINE = 2 SCE_LUA_COMMENTDOC = 3 SCE_LUA_NUMBER = 4 SCE_LUA_WORD = 5 SCE_LUA_STRING = 6 SCE_LUA_CHARACTER = 7 SCE_LUA_LITERALSTRING = 8 SCE_LUA_PREPROCESSOR = 9 SCE_LUA_OPERATOR = 10 SCE_LUA_IDENTIFIER = 11 SCE_LUA_STRINGEOL = 12 SCE_LUA_WORD2 = 13 SCE_LUA_WORD3 = 14 SCE_LUA_WORD4 = 15 SCE_LUA_WORD5 = 16 SCE_LUA_WORD6 = 17 SCE_LUA_WORD7 = 18 SCE_LUA_WORD8 = 19 SCE_ERR_DEFAULT = 0 SCE_ERR_PYTHON = 1 SCE_ERR_GCC = 2 SCE_ERR_MS = 3 SCE_ERR_CMD = 4 SCE_ERR_BORLAND = 5 SCE_ERR_PERL = 6 SCE_ERR_NET = 7 SCE_ERR_LUA = 8 SCE_ERR_CTAG = 9 SCE_ERR_DIFF_CHANGED = 10 SCE_ERR_DIFF_ADDITION = 11 SCE_ERR_DIFF_DELETION = 12 SCE_ERR_DIFF_MESSAGE = 13 SCE_ERR_PHP = 14 SCE_ERR_ELF = 15 SCE_ERR_IFC = 16 SCE_ERR_IFORT = 17 SCE_ERR_ABSF = 18 SCE_ERR_TIDY = 19 SCE_ERR_JAVA_STACK = 20 SCE_ERR_VALUE = 21 SCE_BAT_DEFAULT = 0 SCE_BAT_COMMENT = 1 SCE_BAT_WORD = 2 SCE_BAT_LABEL = 3 SCE_BAT_HIDE = 4 SCE_BAT_COMMAND = 5 SCE_BAT_IDENTIFIER = 6 SCE_BAT_OPERATOR = 7 SCE_MAKE_DEFAULT = 0 SCE_MAKE_COMMENT = 1 SCE_MAKE_PREPROCESSOR = 2 SCE_MAKE_IDENTIFIER = 3 SCE_MAKE_OPERATOR = 4 SCE_MAKE_TARGET = 5 SCE_MAKE_IDEOL = 9 SCE_DIFF_DEFAULT = 0 SCE_DIFF_COMMENT = 1 SCE_DIFF_COMMAND = 2 SCE_DIFF_HEADER = 3 SCE_DIFF_POSITION = 4 SCE_DIFF_DELETED = 5 SCE_DIFF_ADDED = 6 SCE_DIFF_CHANGED = 7 SCE_CONF_DEFAULT = 0 SCE_CONF_COMMENT = 1 SCE_CONF_NUMBER = 2 SCE_CONF_IDENTIFIER = 3 SCE_CONF_EXTENSION = 4 SCE_CONF_PARAMETER = 5 SCE_CONF_STRING = 6 SCE_CONF_OPERATOR = 7 SCE_CONF_IP = 8 SCE_CONF_DIRECTIVE = 9 SCE_AVE_DEFAULT = 0 SCE_AVE_COMMENT = 1 SCE_AVE_NUMBER = 2 SCE_AVE_WORD = 3 SCE_AVE_STRING = 6 SCE_AVE_ENUM = 7 SCE_AVE_STRINGEOL = 8 SCE_AVE_IDENTIFIER = 9 SCE_AVE_OPERATOR = 10 SCE_AVE_WORD1 = 11 SCE_AVE_WORD2 = 12 SCE_AVE_WORD3 = 13 SCE_AVE_WORD4 = 14 SCE_AVE_WORD5 = 15 SCE_AVE_WORD6 = 16 SCE_ADA_DEFAULT = 0 SCE_ADA_WORD = 1 SCE_ADA_IDENTIFIER = 2 SCE_ADA_NUMBER = 3 SCE_ADA_DELIMITER = 4 SCE_ADA_CHARACTER = 5 SCE_ADA_CHARACTEREOL = 6 SCE_ADA_STRING = 7 SCE_ADA_STRINGEOL = 8 SCE_ADA_LABEL = 9 SCE_ADA_COMMENTLINE = 10 SCE_ADA_ILLEGAL = 11 SCE_BAAN_DEFAULT = 0 SCE_BAAN_COMMENT = 1 SCE_BAAN_COMMENTDOC = 2 SCE_BAAN_NUMBER = 3 SCE_BAAN_WORD = 4 SCE_BAAN_STRING = 5 SCE_BAAN_PREPROCESSOR = 6 SCE_BAAN_OPERATOR = 7 SCE_BAAN_IDENTIFIER = 8 SCE_BAAN_STRINGEOL = 9 SCE_BAAN_WORD2 = 10 SCE_LISP_DEFAULT = 0 SCE_LISP_COMMENT = 1 SCE_LISP_NUMBER = 2 SCE_LISP_KEYWORD = 3 SCE_LISP_KEYWORD_KW = 4 SCE_LISP_SYMBOL = 5 SCE_LISP_STRING = 6 SCE_LISP_STRINGEOL = 8 SCE_LISP_IDENTIFIER = 9 SCE_LISP_OPERATOR = 10 SCE_LISP_SPECIAL = 11 SCE_LISP_MULTI_COMMENT = 12 SCE_EIFFEL_DEFAULT = 0 SCE_EIFFEL_COMMENTLINE = 1 SCE_EIFFEL_NUMBER = 2 SCE_EIFFEL_WORD = 3 SCE_EIFFEL_STRING = 4 SCE_EIFFEL_CHARACTER = 5 SCE_EIFFEL_OPERATOR = 6 SCE_EIFFEL_IDENTIFIER = 7 SCE_EIFFEL_STRINGEOL = 8 SCE_NNCRONTAB_DEFAULT = 0 SCE_NNCRONTAB_COMMENT = 1 SCE_NNCRONTAB_TASK = 2 SCE_NNCRONTAB_SECTION = 3 SCE_NNCRONTAB_KEYWORD = 4 SCE_NNCRONTAB_MODIFIER = 5 SCE_NNCRONTAB_ASTERISK = 6 SCE_NNCRONTAB_NUMBER = 7 SCE_NNCRONTAB_STRING = 8 SCE_NNCRONTAB_ENVIRONMENT = 9 SCE_NNCRONTAB_IDENTIFIER = 10 SCE_FORTH_DEFAULT = 0 SCE_FORTH_COMMENT = 1 SCE_FORTH_COMMENT_ML = 2 SCE_FORTH_IDENTIFIER = 3 SCE_FORTH_CONTROL = 4 SCE_FORTH_KEYWORD = 5 SCE_FORTH_DEFWORD = 6 SCE_FORTH_PREWORD1 = 7 SCE_FORTH_PREWORD2 = 8 SCE_FORTH_NUMBER = 9 SCE_FORTH_STRING = 10 SCE_FORTH_LOCALE = 11 SCE_MATLAB_DEFAULT = 0 SCE_MATLAB_COMMENT = 1 SCE_MATLAB_COMMAND = 2 SCE_MATLAB_NUMBER = 3 SCE_MATLAB_KEYWORD = 4 SCE_MATLAB_STRING = 5 SCE_MATLAB_OPERATOR = 6 SCE_MATLAB_IDENTIFIER = 7 SCE_MATLAB_DOUBLEQUOTESTRING = 8 SCE_SCRIPTOL_DEFAULT = 0 SCE_SCRIPTOL_WHITE = 1 SCE_SCRIPTOL_COMMENTLINE = 2 SCE_SCRIPTOL_PERSISTENT = 3 SCE_SCRIPTOL_CSTYLE = 4 SCE_SCRIPTOL_COMMENTBLOCK = 5 SCE_SCRIPTOL_NUMBER = 6 SCE_SCRIPTOL_STRING = 7 SCE_SCRIPTOL_CHARACTER = 8 SCE_SCRIPTOL_STRINGEOL = 9 SCE_SCRIPTOL_KEYWORD = 10 SCE_SCRIPTOL_OPERATOR = 11 SCE_SCRIPTOL_IDENTIFIER = 12 SCE_SCRIPTOL_TRIPLE = 13 SCE_SCRIPTOL_CLASSNAME = 14 SCE_SCRIPTOL_PREPROCESSOR = 15 SCE_ASM_DEFAULT = 0 SCE_ASM_COMMENT = 1 SCE_ASM_NUMBER = 2 SCE_ASM_STRING = 3 SCE_ASM_OPERATOR = 4 SCE_ASM_IDENTIFIER = 5 SCE_ASM_CPUINSTRUCTION = 6 SCE_ASM_MATHINSTRUCTION = 7 SCE_ASM_REGISTER = 8 SCE_ASM_DIRECTIVE = 9 SCE_ASM_DIRECTIVEOPERAND = 10 SCE_ASM_COMMENTBLOCK = 11 SCE_ASM_CHARACTER = 12 SCE_ASM_STRINGEOL = 13 SCE_ASM_EXTINSTRUCTION = 14 SCE_F_DEFAULT = 0 SCE_F_COMMENT = 1 SCE_F_NUMBER = 2 SCE_F_STRING1 = 3 SCE_F_STRING2 = 4 SCE_F_STRINGEOL = 5 SCE_F_OPERATOR = 6 SCE_F_IDENTIFIER = 7 SCE_F_WORD = 8 SCE_F_WORD2 = 9 SCE_F_WORD3 = 10 SCE_F_PREPROCESSOR = 11 SCE_F_OPERATOR2 = 12 SCE_F_LABEL = 13 SCE_F_CONTINUATION = 14 SCE_CSS_DEFAULT = 0 SCE_CSS_TAG = 1 SCE_CSS_CLASS = 2 SCE_CSS_PSEUDOCLASS = 3 SCE_CSS_UNKNOWN_PSEUDOCLASS = 4 SCE_CSS_OPERATOR = 5 SCE_CSS_IDENTIFIER = 6 SCE_CSS_UNKNOWN_IDENTIFIER = 7 SCE_CSS_VALUE = 8 SCE_CSS_COMMENT = 9 SCE_CSS_ID = 10 SCE_CSS_IMPORTANT = 11 SCE_CSS_DIRECTIVE = 12 SCE_CSS_DOUBLESTRING = 13 SCE_CSS_SINGLESTRING = 14 SCE_CSS_IDENTIFIER2 = 15 SCE_CSS_ATTRIBUTE = 16 SCE_CSS_IDENTIFIER3 = 17 SCE_CSS_PSEUDOELEMENT = 18 SCE_CSS_EXTENDED_IDENTIFIER = 19 SCE_CSS_EXTENDED_PSEUDOCLASS = 20 SCE_CSS_EXTENDED_PSEUDOELEMENT = 21 SCE_POV_DEFAULT = 0 SCE_POV_COMMENT = 1 SCE_POV_COMMENTLINE = 2 SCE_POV_NUMBER = 3 SCE_POV_OPERATOR = 4 SCE_POV_IDENTIFIER = 5 SCE_POV_STRING = 6 SCE_POV_STRINGEOL = 7 SCE_POV_DIRECTIVE = 8 SCE_POV_BADDIRECTIVE = 9 SCE_POV_WORD2 = 10 SCE_POV_WORD3 = 11 SCE_POV_WORD4 = 12 SCE_POV_WORD5 = 13 SCE_POV_WORD6 = 14 SCE_POV_WORD7 = 15 SCE_POV_WORD8 = 16 SCE_LOUT_DEFAULT = 0 SCE_LOUT_COMMENT = 1 SCE_LOUT_NUMBER = 2 SCE_LOUT_WORD = 3 SCE_LOUT_WORD2 = 4 SCE_LOUT_WORD3 = 5 SCE_LOUT_WORD4 = 6 SCE_LOUT_STRING = 7 SCE_LOUT_OPERATOR = 8 SCE_LOUT_IDENTIFIER = 9 SCE_LOUT_STRINGEOL = 10 SCE_ESCRIPT_DEFAULT = 0 SCE_ESCRIPT_COMMENT = 1 SCE_ESCRIPT_COMMENTLINE = 2 SCE_ESCRIPT_COMMENTDOC = 3 SCE_ESCRIPT_NUMBER = 4 SCE_ESCRIPT_WORD = 5 SCE_ESCRIPT_STRING = 6 SCE_ESCRIPT_OPERATOR = 7 SCE_ESCRIPT_IDENTIFIER = 8 SCE_ESCRIPT_BRACE = 9 SCE_ESCRIPT_WORD2 = 10 SCE_ESCRIPT_WORD3 = 11 SCE_PS_DEFAULT = 0 SCE_PS_COMMENT = 1 SCE_PS_DSC_COMMENT = 2 SCE_PS_DSC_VALUE = 3 SCE_PS_NUMBER = 4 SCE_PS_NAME = 5 SCE_PS_KEYWORD = 6 SCE_PS_LITERAL = 7 SCE_PS_IMMEVAL = 8 SCE_PS_PAREN_ARRAY = 9 SCE_PS_PAREN_DICT = 10 SCE_PS_PAREN_PROC = 11 SCE_PS_TEXT = 12 SCE_PS_HEXSTRING = 13 SCE_PS_BASE85STRING = 14 SCE_PS_BADSTRINGCHAR = 15 SCE_NSIS_DEFAULT = 0 SCE_NSIS_COMMENT = 1 SCE_NSIS_STRINGDQ = 2 SCE_NSIS_STRINGLQ = 3 SCE_NSIS_STRINGRQ = 4 SCE_NSIS_FUNCTION = 5 SCE_NSIS_VARIABLE = 6 SCE_NSIS_LABEL = 7 SCE_NSIS_USERDEFINED = 8 SCE_NSIS_SECTIONDEF = 9 SCE_NSIS_SUBSECTIONDEF = 10 SCE_NSIS_IFDEFINEDEF = 11 SCE_NSIS_MACRODEF = 12 SCE_NSIS_STRINGVAR = 13 SCE_NSIS_NUMBER = 14 SCE_NSIS_SECTIONGROUP = 15 SCE_NSIS_PAGEEX = 16 SCE_NSIS_FUNCTIONDEF = 17 SCE_NSIS_COMMENTBOX = 18 SCE_MMIXAL_LEADWS = 0 SCE_MMIXAL_COMMENT = 1 SCE_MMIXAL_LABEL = 2 SCE_MMIXAL_OPCODE = 3 SCE_MMIXAL_OPCODE_PRE = 4 SCE_MMIXAL_OPCODE_VALID = 5 SCE_MMIXAL_OPCODE_UNKNOWN = 6 SCE_MMIXAL_OPCODE_POST = 7 SCE_MMIXAL_OPERANDS = 8 SCE_MMIXAL_NUMBER = 9 SCE_MMIXAL_REF = 10 SCE_MMIXAL_CHAR = 11 SCE_MMIXAL_STRING = 12 SCE_MMIXAL_REGISTER = 13 SCE_MMIXAL_HEX = 14 SCE_MMIXAL_OPERATOR = 15 SCE_MMIXAL_SYMBOL = 16 SCE_MMIXAL_INCLUDE = 17 SCE_CLW_DEFAULT = 0 SCE_CLW_LABEL = 1 SCE_CLW_COMMENT = 2 SCE_CLW_STRING = 3 SCE_CLW_USER_IDENTIFIER = 4 SCE_CLW_INTEGER_CONSTANT = 5 SCE_CLW_REAL_CONSTANT = 6 SCE_CLW_PICTURE_STRING = 7 SCE_CLW_KEYWORD = 8 SCE_CLW_COMPILER_DIRECTIVE = 9 SCE_CLW_RUNTIME_EXPRESSIONS = 10 SCE_CLW_BUILTIN_PROCEDURES_FUNCTION = 11 SCE_CLW_STRUCTURE_DATA_TYPE = 12 SCE_CLW_ATTRIBUTE = 13 SCE_CLW_STANDARD_EQUATE = 14 SCE_CLW_ERROR = 15 SCE_CLW_DEPRECATED = 16 SCE_LOT_DEFAULT = 0 SCE_LOT_HEADER = 1 SCE_LOT_BREAK = 2 SCE_LOT_SET = 3 SCE_LOT_PASS = 4 SCE_LOT_FAIL = 5 SCE_LOT_ABORT = 6 SCE_YAML_DEFAULT = 0 SCE_YAML_COMMENT = 1 SCE_YAML_IDENTIFIER = 2 SCE_YAML_KEYWORD = 3 SCE_YAML_NUMBER = 4 SCE_YAML_REFERENCE = 5 SCE_YAML_DOCUMENT = 6 SCE_YAML_TEXT = 7 SCE_YAML_ERROR = 8 SCE_YAML_OPERATOR = 9 SCE_TEX_DEFAULT = 0 SCE_TEX_SPECIAL = 1 SCE_TEX_GROUP = 2 SCE_TEX_SYMBOL = 3 SCE_TEX_COMMAND = 4 SCE_TEX_TEXT = 5 SCE_METAPOST_DEFAULT = 0 SCE_METAPOST_SPECIAL = 1 SCE_METAPOST_GROUP = 2 SCE_METAPOST_SYMBOL = 3 SCE_METAPOST_COMMAND = 4 SCE_METAPOST_TEXT = 5 SCE_METAPOST_EXTRA = 6 SCE_ERLANG_DEFAULT = 0 SCE_ERLANG_COMMENT = 1 SCE_ERLANG_VARIABLE = 2 SCE_ERLANG_NUMBER = 3 SCE_ERLANG_KEYWORD = 4 SCE_ERLANG_STRING = 5 SCE_ERLANG_OPERATOR = 6 SCE_ERLANG_ATOM = 7 SCE_ERLANG_FUNCTION_NAME = 8 SCE_ERLANG_CHARACTER = 9 SCE_ERLANG_MACRO = 10 SCE_ERLANG_RECORD = 11 SCE_ERLANG_SEPARATOR = 12 SCE_ERLANG_NODE_NAME = 13 SCE_ERLANG_UNKNOWN = 31 SCE_MSSQL_DEFAULT = 0 SCE_MSSQL_COMMENT = 1 SCE_MSSQL_LINE_COMMENT = 2 SCE_MSSQL_NUMBER = 3 SCE_MSSQL_STRING = 4 SCE_MSSQL_OPERATOR = 5 SCE_MSSQL_IDENTIFIER = 6 SCE_MSSQL_VARIABLE = 7 SCE_MSSQL_COLUMN_NAME = 8 SCE_MSSQL_STATEMENT = 9 SCE_MSSQL_DATATYPE = 10 SCE_MSSQL_SYSTABLE = 11 SCE_MSSQL_GLOBAL_VARIABLE = 12 SCE_MSSQL_FUNCTION = 13 SCE_MSSQL_STORED_PROCEDURE = 14 SCE_MSSQL_DEFAULT_PREF_DATATYPE = 15 SCE_MSSQL_COLUMN_NAME_2 = 16 SCE_V_DEFAULT = 0 SCE_V_COMMENT = 1 SCE_V_COMMENTLINE = 2 SCE_V_COMMENTLINEBANG = 3 SCE_V_NUMBER = 4 SCE_V_WORD = 5 SCE_V_STRING = 6 SCE_V_WORD2 = 7 SCE_V_WORD3 = 8 SCE_V_PREPROCESSOR = 9 SCE_V_OPERATOR = 10 SCE_V_IDENTIFIER = 11 SCE_V_STRINGEOL = 12 SCE_V_USER = 19 SCE_KIX_DEFAULT = 0 SCE_KIX_COMMENT = 1 SCE_KIX_STRING1 = 2 SCE_KIX_STRING2 = 3 SCE_KIX_NUMBER = 4 SCE_KIX_VAR = 5 SCE_KIX_MACRO = 6 SCE_KIX_KEYWORD = 7 SCE_KIX_FUNCTIONS = 8 SCE_KIX_OPERATOR = 9 SCE_KIX_IDENTIFIER = 31 SCE_GC_DEFAULT = 0 SCE_GC_COMMENTLINE = 1 SCE_GC_COMMENTBLOCK = 2 SCE_GC_GLOBAL = 3 SCE_GC_EVENT = 4 SCE_GC_ATTRIBUTE = 5 SCE_GC_CONTROL = 6 SCE_GC_COMMAND = 7 SCE_GC_STRING = 8 SCE_GC_OPERATOR = 9 SCE_SN_DEFAULT = 0 SCE_SN_CODE = 1 SCE_SN_COMMENTLINE = 2 SCE_SN_COMMENTLINEBANG = 3 SCE_SN_NUMBER = 4 SCE_SN_WORD = 5 SCE_SN_STRING = 6 SCE_SN_WORD2 = 7 SCE_SN_WORD3 = 8 SCE_SN_PREPROCESSOR = 9 SCE_SN_OPERATOR = 10 SCE_SN_IDENTIFIER = 11 SCE_SN_STRINGEOL = 12 SCE_SN_REGEXTAG = 13 SCE_SN_SIGNAL = 14 SCE_SN_USER = 19 SCE_AU3_DEFAULT = 0 SCE_AU3_COMMENT = 1 SCE_AU3_COMMENTBLOCK = 2 SCE_AU3_NUMBER = 3 SCE_AU3_FUNCTION = 4 SCE_AU3_KEYWORD = 5 SCE_AU3_MACRO = 6 SCE_AU3_STRING = 7 SCE_AU3_OPERATOR = 8 SCE_AU3_VARIABLE = 9 SCE_AU3_SENT = 10 SCE_AU3_PREPROCESSOR = 11 SCE_AU3_SPECIAL = 12 SCE_AU3_EXPAND = 13 SCE_AU3_COMOBJ = 14 SCE_AU3_UDF = 15 SCE_APDL_DEFAULT = 0 SCE_APDL_COMMENT = 1 SCE_APDL_COMMENTBLOCK = 2 SCE_APDL_NUMBER = 3 SCE_APDL_STRING = 4 SCE_APDL_OPERATOR = 5 SCE_APDL_WORD = 6 SCE_APDL_PROCESSOR = 7 SCE_APDL_COMMAND = 8 SCE_APDL_SLASHCOMMAND = 9 SCE_APDL_STARCOMMAND = 10 SCE_APDL_ARGUMENT = 11 SCE_APDL_FUNCTION = 12 SCE_SH_DEFAULT = 0 SCE_SH_ERROR = 1 SCE_SH_COMMENTLINE = 2 SCE_SH_NUMBER = 3 SCE_SH_WORD = 4 SCE_SH_STRING = 5 SCE_SH_CHARACTER = 6 SCE_SH_OPERATOR = 7 SCE_SH_IDENTIFIER = 8 SCE_SH_SCALAR = 9 SCE_SH_PARAM = 10 SCE_SH_BACKTICKS = 11 SCE_SH_HERE_DELIM = 12 SCE_SH_HERE_Q = 13 SCE_ASN1_DEFAULT = 0 SCE_ASN1_COMMENT = 1 SCE_ASN1_IDENTIFIER = 2 SCE_ASN1_STRING = 3 SCE_ASN1_OID = 4 SCE_ASN1_SCALAR = 5 SCE_ASN1_KEYWORD = 6 SCE_ASN1_ATTRIBUTE = 7 SCE_ASN1_DESCRIPTOR = 8 SCE_ASN1_TYPE = 9 SCE_ASN1_OPERATOR = 10 SCE_VHDL_DEFAULT = 0 SCE_VHDL_COMMENT = 1 SCE_VHDL_COMMENTLINEBANG = 2 SCE_VHDL_NUMBER = 3 SCE_VHDL_STRING = 4 SCE_VHDL_OPERATOR = 5 SCE_VHDL_IDENTIFIER = 6 SCE_VHDL_STRINGEOL = 7 SCE_VHDL_KEYWORD = 8 SCE_VHDL_STDOPERATOR = 9 SCE_VHDL_ATTRIBUTE = 10 SCE_VHDL_STDFUNCTION = 11 SCE_VHDL_STDPACKAGE = 12 SCE_VHDL_STDTYPE = 13 SCE_VHDL_USERWORD = 14 SCE_CAML_DEFAULT = 0 SCE_CAML_IDENTIFIER = 1 SCE_CAML_TAGNAME = 2 SCE_CAML_KEYWORD = 3 SCE_CAML_KEYWORD2 = 4 SCE_CAML_KEYWORD3 = 5 SCE_CAML_LINENUM = 6 SCE_CAML_OPERATOR = 7 SCE_CAML_NUMBER = 8 SCE_CAML_CHAR = 9 SCE_CAML_STRING = 11 SCE_CAML_COMMENT = 12 SCE_CAML_COMMENT1 = 13 SCE_CAML_COMMENT2 = 14 SCE_CAML_COMMENT3 = 15 SCE_HA_DEFAULT = 0 SCE_HA_IDENTIFIER = 1 SCE_HA_KEYWORD = 2 SCE_HA_NUMBER = 3 SCE_HA_STRING = 4 SCE_HA_CHARACTER = 5 SCE_HA_CLASS = 6 SCE_HA_MODULE = 7 SCE_HA_CAPITAL = 8 SCE_HA_DATA = 9 SCE_HA_IMPORT = 10 SCE_HA_OPERATOR = 11 SCE_HA_INSTANCE = 12 SCE_HA_COMMENTLINE = 13 SCE_HA_COMMENTBLOCK = 14 SCE_HA_COMMENTBLOCK2 = 15 SCE_HA_COMMENTBLOCK3 = 16 SCE_T3_DEFAULT = 0 SCE_T3_X_DEFAULT = 1 SCE_T3_PREPROCESSOR = 2 SCE_T3_BLOCK_COMMENT = 3 SCE_T3_LINE_COMMENT = 4 SCE_T3_OPERATOR = 5 SCE_T3_KEYWORD = 6 SCE_T3_NUMBER = 7 SCE_T3_IDENTIFIER = 8 SCE_T3_S_STRING = 9 SCE_T3_D_STRING = 10 SCE_T3_X_STRING = 11 SCE_T3_LIB_DIRECTIVE = 12 SCE_T3_MSG_PARAM = 13 SCE_T3_HTML_TAG = 14 SCE_T3_HTML_DEFAULT = 15 SCE_T3_HTML_STRING = 16 SCE_T3_USER1 = 17 SCE_T3_USER2 = 18 SCE_T3_USER3 = 19 SCE_T3_BRACE = 20 SCE_REBOL_DEFAULT = 0 SCE_REBOL_COMMENTLINE = 1 SCE_REBOL_COMMENTBLOCK = 2 SCE_REBOL_PREFACE = 3 SCE_REBOL_OPERATOR = 4 SCE_REBOL_CHARACTER = 5 SCE_REBOL_QUOTEDSTRING = 6 SCE_REBOL_BRACEDSTRING = 7 SCE_REBOL_NUMBER = 8 SCE_REBOL_PAIR = 9 SCE_REBOL_TUPLE = 10 SCE_REBOL_BINARY = 11 SCE_REBOL_MONEY = 12 SCE_REBOL_ISSUE = 13 SCE_REBOL_TAG = 14 SCE_REBOL_FILE = 15 SCE_REBOL_EMAIL = 16 SCE_REBOL_URL = 17 SCE_REBOL_DATE = 18 SCE_REBOL_TIME = 19 SCE_REBOL_IDENTIFIER = 20 SCE_REBOL_WORD = 21 SCE_REBOL_WORD2 = 22 SCE_REBOL_WORD3 = 23 SCE_REBOL_WORD4 = 24 SCE_REBOL_WORD5 = 25 SCE_REBOL_WORD6 = 26 SCE_REBOL_WORD7 = 27 SCE_REBOL_WORD8 = 28 SCE_SQL_DEFAULT = 0 SCE_SQL_COMMENT = 1 SCE_SQL_COMMENTLINE = 2 SCE_SQL_COMMENTDOC = 3 SCE_SQL_NUMBER = 4 SCE_SQL_WORD = 5 SCE_SQL_STRING = 6 SCE_SQL_CHARACTER = 7 SCE_SQL_SQLPLUS = 8 SCE_SQL_SQLPLUS_PROMPT = 9 SCE_SQL_OPERATOR = 10 SCE_SQL_IDENTIFIER = 11 SCE_SQL_SQLPLUS_COMMENT = 13 SCE_SQL_COMMENTLINEDOC = 15 SCE_SQL_WORD2 = 16 SCE_SQL_COMMENTDOCKEYWORD = 17 SCE_SQL_COMMENTDOCKEYWORDERROR = 18 SCE_SQL_USER1 = 19 SCE_SQL_USER2 = 20 SCE_SQL_USER3 = 21 SCE_SQL_USER4 = 22 SCE_SQL_QUOTEDIDENTIFIER = 23 SCE_ST_DEFAULT = 0 SCE_ST_STRING = 1 SCE_ST_NUMBER = 2 SCE_ST_COMMENT = 3 SCE_ST_SYMBOL = 4 SCE_ST_BINARY = 5 SCE_ST_BOOL = 6 SCE_ST_SELF = 7 SCE_ST_SUPER = 8 SCE_ST_NIL = 9 SCE_ST_GLOBAL = 10 SCE_ST_RETURN = 11 SCE_ST_SPECIAL = 12 SCE_ST_KWSEND = 13 SCE_ST_ASSIGN = 14 SCE_ST_CHARACTER = 15 SCE_ST_SPEC_SEL = 16 SCE_FS_DEFAULT = 0 SCE_FS_COMMENT = 1 SCE_FS_COMMENTLINE = 2 SCE_FS_COMMENTDOC = 3 SCE_FS_COMMENTLINEDOC = 4 SCE_FS_COMMENTDOCKEYWORD = 5 SCE_FS_COMMENTDOCKEYWORDERROR = 6 SCE_FS_KEYWORD = 7 SCE_FS_KEYWORD2 = 8 SCE_FS_KEYWORD3 = 9 SCE_FS_KEYWORD4 = 10 SCE_FS_NUMBER = 11 SCE_FS_STRING = 12 SCE_FS_PREPROCESSOR = 13 SCE_FS_OPERATOR = 14 SCE_FS_IDENTIFIER = 15 SCE_FS_DATE = 16 SCE_FS_STRINGEOL = 17 SCE_FS_CONSTANT = 18 SCE_FS_ASM = 19 SCE_FS_LABEL = 20 SCE_FS_ERROR = 21 SCE_FS_HEXNUMBER = 22 SCE_FS_BINNUMBER = 23 SCE_CSOUND_DEFAULT = 0 SCE_CSOUND_COMMENT = 1 SCE_CSOUND_NUMBER = 2 SCE_CSOUND_OPERATOR = 3 SCE_CSOUND_INSTR = 4 SCE_CSOUND_IDENTIFIER = 5 SCE_CSOUND_OPCODE = 6 SCE_CSOUND_HEADERSTMT = 7 SCE_CSOUND_USERKEYWORD = 8 SCE_CSOUND_COMMENTBLOCK = 9 SCE_CSOUND_PARAM = 10 SCE_CSOUND_ARATE_VAR = 11 SCE_CSOUND_KRATE_VAR = 12 SCE_CSOUND_IRATE_VAR = 13 SCE_CSOUND_GLOBAL_VAR = 14 SCE_CSOUND_STRINGEOL = 15 SCE_INNO_DEFAULT = 0 SCE_INNO_COMMENT = 1 SCE_INNO_KEYWORD = 2 SCE_INNO_PARAMETER = 3 SCE_INNO_SECTION = 4 SCE_INNO_PREPROC = 5 SCE_INNO_PREPROC_INLINE = 6 SCE_INNO_COMMENT_PASCAL = 7 SCE_INNO_KEYWORD_PASCAL = 8 SCE_INNO_KEYWORD_USER = 9 SCE_INNO_STRING_DOUBLE = 10 SCE_INNO_STRING_SINGLE = 11 SCE_INNO_IDENTIFIER = 12 SCE_OPAL_SPACE = 0 SCE_OPAL_COMMENT_BLOCK = 1 SCE_OPAL_COMMENT_LINE = 2 SCE_OPAL_INTEGER = 3 SCE_OPAL_KEYWORD = 4 SCE_OPAL_SORT = 5 SCE_OPAL_STRING = 6 SCE_OPAL_PAR = 7 SCE_OPAL_BOOL_CONST = 8 SCE_OPAL_DEFAULT = 32 SCE_SPICE_DEFAULT = 0 SCE_SPICE_IDENTIFIER = 1 SCE_SPICE_KEYWORD = 2 SCE_SPICE_KEYWORD2 = 3 SCE_SPICE_KEYWORD3 = 4 SCE_SPICE_NUMBER = 5 SCE_SPICE_DELIMITER = 6 SCE_SPICE_VALUE = 7 SCE_SPICE_COMMENTLINE = 8 SCE_CMAKE_DEFAULT = 0 SCE_CMAKE_COMMENT = 1 SCE_CMAKE_STRINGDQ = 2 SCE_CMAKE_STRINGLQ = 3 SCE_CMAKE_STRINGRQ = 4 SCE_CMAKE_COMMANDS = 5 SCE_CMAKE_PARAMETERS = 6 SCE_CMAKE_VARIABLE = 7 SCE_CMAKE_USERDEFINED = 8 SCE_CMAKE_WHILEDEF = 9 SCE_CMAKE_FOREACHDEF = 10 SCE_CMAKE_IFDEFINEDEF = 11 SCE_CMAKE_MACRODEF = 12 SCE_CMAKE_STRINGVAR = 13 SCE_CMAKE_NUMBER = 14 SCE_GAP_DEFAULT = 0 SCE_GAP_IDENTIFIER = 1 SCE_GAP_KEYWORD = 2 SCE_GAP_KEYWORD2 = 3 SCE_GAP_KEYWORD3 = 4 SCE_GAP_KEYWORD4 = 5 SCE_GAP_STRING = 6 SCE_GAP_CHAR = 7 SCE_GAP_OPERATOR = 8 SCE_GAP_COMMENT = 9 SCE_GAP_NUMBER = 10 SCE_GAP_STRINGEOL = 11 SCE_PLM_DEFAULT = 0 SCE_PLM_COMMENT = 1 SCE_PLM_STRING = 2 SCE_PLM_NUMBER = 3 SCE_PLM_IDENTIFIER = 4 SCE_PLM_OPERATOR = 5 SCE_PLM_CONTROL = 6 SCE_PLM_KEYWORD = 7 SCE_4GL_DEFAULT = 0 SCE_4GL_NUMBER = 1 SCE_4GL_WORD = 2 SCE_4GL_STRING = 3 SCE_4GL_CHARACTER = 4 SCE_4GL_PREPROCESSOR = 5 SCE_4GL_OPERATOR = 6 SCE_4GL_IDENTIFIER = 7 SCE_4GL_BLOCK = 8 SCE_4GL_END = 9 SCE_4GL_COMMENT1 = 10 SCE_4GL_COMMENT2 = 11 SCE_4GL_COMMENT3 = 12 SCE_4GL_COMMENT4 = 13 SCE_4GL_COMMENT5 = 14 SCE_4GL_COMMENT6 = 15 SCE_4GL_DEFAULT_ = 16 SCE_4GL_NUMBER_ = 17 SCE_4GL_WORD_ = 18 SCE_4GL_STRING_ = 19 SCE_4GL_CHARACTER_ = 20 SCE_4GL_PREPROCESSOR_ = 21 SCE_4GL_OPERATOR_ = 22 SCE_4GL_IDENTIFIER_ = 23 SCE_4GL_BLOCK_ = 24 SCE_4GL_END_ = 25 SCE_4GL_COMMENT1_ = 26 SCE_4GL_COMMENT2_ = 27 SCE_4GL_COMMENT3_ = 28 SCE_4GL_COMMENT4_ = 29 SCE_4GL_COMMENT5_ = 30 SCE_4GL_COMMENT6_ = 31 SCE_ABAQUS_DEFAULT = 0 SCE_ABAQUS_COMMENT = 1 SCE_ABAQUS_COMMENTBLOCK = 2 SCE_ABAQUS_NUMBER = 3 SCE_ABAQUS_STRING = 4 SCE_ABAQUS_OPERATOR = 5 SCE_ABAQUS_WORD = 6 SCE_ABAQUS_PROCESSOR = 7 SCE_ABAQUS_COMMAND = 8 SCE_ABAQUS_SLASHCOMMAND = 9 SCE_ABAQUS_STARCOMMAND = 10 SCE_ABAQUS_ARGUMENT = 11 SCE_ABAQUS_FUNCTION = 12 SCE_ASY_DEFAULT = 0 SCE_ASY_COMMENT = 1 SCE_ASY_COMMENTLINE = 2 SCE_ASY_NUMBER = 3 SCE_ASY_WORD = 4 SCE_ASY_STRING = 5 SCE_ASY_CHARACTER = 6 SCE_ASY_OPERATOR = 7 SCE_ASY_IDENTIFIER = 8 SCE_ASY_STRINGEOL = 9 SCE_ASY_COMMENTLINEDOC = 10 SCE_ASY_WORD2 = 11 SCE_R_DEFAULT = 0 SCE_R_COMMENT = 1 SCE_R_KWORD = 2 SCE_R_BASEKWORD = 3 SCE_R_OTHERKWORD = 4 SCE_R_NUMBER = 5 SCE_R_STRING = 6 SCE_R_STRING2 = 7 SCE_R_OPERATOR = 8 SCE_R_IDENTIFIER = 9 SCE_R_INFIX = 10 SCE_R_INFIXEOL = 11 SCE_MAGIK_DEFAULT = 0 SCE_MAGIK_COMMENT = 1 SCE_MAGIK_HYPER_COMMENT = 16 SCE_MAGIK_STRING = 2 SCE_MAGIK_CHARACTER = 3 SCE_MAGIK_NUMBER = 4 SCE_MAGIK_IDENTIFIER = 5 SCE_MAGIK_OPERATOR = 6 SCE_MAGIK_FLOW = 7 SCE_MAGIK_CONTAINER = 8 SCE_MAGIK_BRACKET_BLOCK = 9 SCE_MAGIK_BRACE_BLOCK = 10 SCE_MAGIK_SQBRACKET_BLOCK = 11 SCE_MAGIK_UNKNOWN_KEYWORD = 12 SCE_MAGIK_KEYWORD = 13 SCE_MAGIK_PRAGMA = 14 SCE_MAGIK_SYMBOL = 15 SCE_POWERSHELL_DEFAULT = 0 SCE_POWERSHELL_COMMENT = 1 SCE_POWERSHELL_STRING = 2 SCE_POWERSHELL_CHARACTER = 3 SCE_POWERSHELL_NUMBER = 4 SCE_POWERSHELL_VARIABLE = 5 SCE_POWERSHELL_OPERATOR = 6 SCE_POWERSHELL_IDENTIFIER = 7 SCE_POWERSHELL_KEYWORD = 8 SCE_POWERSHELL_CMDLET = 9 SCE_POWERSHELL_ALIAS = 10 SCE_MYSQL_DEFAULT = 0 SCE_MYSQL_COMMENT = 1 SCE_MYSQL_COMMENTLINE = 2 SCE_MYSQL_VARIABLE = 3 SCE_MYSQL_SYSTEMVARIABLE = 4 SCE_MYSQL_KNOWNSYSTEMVARIABLE = 5 SCE_MYSQL_NUMBER = 6 SCE_MYSQL_MAJORKEYWORD = 7 SCE_MYSQL_KEYWORD = 8 SCE_MYSQL_DATABASEOBJECT = 9 SCE_MYSQL_PROCEDUREKEYWORD = 10 SCE_MYSQL_STRING = 11 SCE_MYSQL_SQSTRING = 12 SCE_MYSQL_DQSTRING = 13 SCE_MYSQL_OPERATOR = 14 SCE_MYSQL_FUNCTION = 15 SCE_MYSQL_IDENTIFIER = 16 SCE_MYSQL_QUOTEDIDENTIFIER = 17 SCE_MYSQL_USER1 = 18 SCE_MYSQL_USER2 = 19 SCE_MYSQL_USER3 = 20 SCE_PO_DEFAULT = 0 SCE_PO_COMMENT = 1 SCE_PO_MSGID = 2 SCE_PO_MSGID_TEXT = 3 SCE_PO_MSGSTR = 4 SCE_PO_MSGSTR_TEXT = 5 SCE_PO_MSGCTXT = 6 SCE_PO_MSGCTXT_TEXT = 7 SCE_PO_FUZZY = 8 SCLEX_ASP = 29 SCLEX_PHP = 30
v17al/Flexget
refs/heads/develop
tests/test_config_schema.py
10
from __future__ import unicode_literals, division, absolute_import import jsonschema from flexget import config_schema from tests import FlexGetBase def iter_registered_schemas(): for path in config_schema.schema_paths: schema = config_schema.resolve_ref(path) yield path, schema class TestSchemaValidator(FlexGetBase): def test_registered_schemas_are_valid(self): for path, schema in iter_registered_schemas(): try: config_schema.SchemaValidator.check_schema(schema) except jsonschema.SchemaError as e: assert False, 'plugin `%s` has an invalid schema. %s %s %s' % ( path, '/'.join(str(p) for p in e.path), e.validator, e.message) except Exception as e: assert False, 'plugin `%s` has an invalid schema. %s' % (path, e) def test_refs_in_schemas_are_resolvable(self): def refs_in(item): if isinstance(item, dict): for key, value in item.iteritems(): if key == '$ref': yield value else: for ref in refs_in(value): yield ref elif isinstance(item, list): for i in item: for ref in refs_in(i): yield ref for path, schema in iter_registered_schemas(): resolver = config_schema.RefResolver.from_schema(schema) for ref in refs_in(schema): try: with resolver.resolving(ref): pass except jsonschema.RefResolutionError: assert False, '$ref %s in schema %s is invalid' % (ref, path) def test_resolves_local_refs(self): schema = {'$ref': '/schema/plugin/accept_all'} # accept_all schema should be for type boolean assert not config_schema.process_config(True, schema) assert config_schema.process_config(14, schema) def test_custom_format_checker(self): schema = {'type': 'string', 'format': 'quality'} assert not config_schema.process_config('720p', schema) assert config_schema.process_config('aoeu', schema) def test_custom_error(self): schema = {'type': 'string', 'error': 'This is not okay'} errors = config_schema.process_config(13, schema) assert errors[0].message == schema['error'] def test_custom_error_template(self): schema = {'type': 'string', 'minLength': 10, 'error': '{{validator}} failed for {{instance}}'} errors = config_schema.process_config(13, schema) assert errors[0].message == "type failed for 13" errors = config_schema.process_config('aoeu', schema) assert errors[0].message == "minLength failed for aoeu" def test_custom_keyword_error(self): schema = {'type': 'string', 'error_type': 'This is not okay'} errors = config_schema.process_config(13, schema) assert errors[0].message == schema['error_type'] def test_custom_keyword_error_overrides(self): schema = {'type': 'string', 'error_type': 'This is not okay', 'error': 'This is worse'} errors = config_schema.process_config(13, schema) assert errors[0].message == schema['error_type'] def test_error_with_path(self): schema = {'properties': {'p': {'items': {'type': 'string', 'error': 'ERROR'}}}} errors = config_schema.process_config({'p': [13]}, schema) assert errors[0].json_pointer == '/p/0' assert errors[0].message == 'ERROR' def test_builtin_error_rewriting(self): schema = {'type': 'object'} errors = config_schema.process_config(42, schema) # We don't call them objects around here assert 'object' not in errors[0].message assert 'dict' in errors[0].message def test_anyOf_branch_is_chosen_based_on_type_errors(self): schema = { "anyOf": [ {"type": ["string", "array"]}, { "anyOf": [ {"type": "integer"}, {"type": "number", "minimum": 5} ] } ] } # If there are type errors on both sides, it should be a virtual type error with all types errors = config_schema.process_config(True, schema) assert len(errors) == 1 assert tuple(errors[0].schema_path) == ('anyOf', 'type') # It should have all the types together assert set(errors[0].validator_value) == set(['string', 'array', 'number', 'integer']) # If there are no type errors going down one branch it should choose it errors = config_schema.process_config(1.5, schema) assert len(errors) == 1 assert errors[0].validator == 'minimum' def test_oneOf_branch_is_chosen_based_on_type_errors(self): schema = { "oneOf": [ {"type": ["string", "array"]}, { "oneOf": [ {"type": "integer"}, {"type": "number", "minimum": 5} ] } ] } errors = config_schema.process_config(True, schema) # If there are type errors on both sides, it should be a virtual type error with all types assert len(errors) == 1 assert tuple(errors[0].schema_path) == ('oneOf', 'type') # It should have all the types together assert set(errors[0].validator_value) == set(['string', 'array', 'number', 'integer']) # If there are no type errors going down one branch it should choose it errors = config_schema.process_config(1.5, schema) assert len(errors) == 1 assert errors[0].validator == 'minimum' def test_defaults_are_filled(self): schema = {"properties": {"p": {"default": 5}}} config = {} config_schema.process_config(config, schema) assert config["p"] == 5 def test_defaults_does_not_override_explicit_value(self): schema = {"properties": {"p": {"default": 5}}} config = {"p": "foo"} config_schema.process_config(config, schema) assert config["p"] == "foo"
j0057/ansible-1
refs/heads/fix-powershell-shebang-not-found
lib/ansible/inventory/ini.py
111
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range from ansible import errors from ansible import utils import shlex import re import ast class InventoryParser(object): """ Host inventory for ansible. """ def __init__(self, filename=C.DEFAULT_HOST_LIST): with open(filename) as fh: self.filename = filename self.lines = fh.readlines() self.groups = {} self.hosts = {} self._parse() def _parse(self): self._parse_base_groups() self._parse_group_children() self._add_allgroup_children() self._parse_group_variables() return self.groups @staticmethod def _parse_value(v): if "#" not in v: try: ret = ast.literal_eval(v) if not isinstance(ret, float): # Do not trim floats. Eg: "1.20" to 1.2 return ret # Using explicit exceptions. # Likely a string that literal_eval does not like. We wil then just set it. except ValueError: # For some reason this was thought to be malformed. pass except SyntaxError: # Is this a hash with an equals at the end? pass return v # [webservers] # alpha # beta:2345 # gamma sudo=True user=root # delta asdf=jkl favcolor=red def _add_allgroup_children(self): for group in self.groups.values(): if group.depth == 0 and group.name != 'all': self.groups['all'].add_child_group(group) def _parse_base_groups(self): # FIXME: refactor ungrouped = Group(name='ungrouped') all = Group(name='all') all.add_child_group(ungrouped) self.groups = dict(all=all, ungrouped=ungrouped) active_group_name = 'ungrouped' for lineno in range(len(self.lines)): line = utils.before_comment(self.lines[lineno]).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if ":vars" in line or ":children" in line: active_group_name = active_group_name.rsplit(":", 1)[0] if active_group_name not in self.groups: new_group = self.groups[active_group_name] = Group(name=active_group_name) active_group_name = None elif active_group_name not in self.groups: new_group = self.groups[active_group_name] = Group(name=active_group_name) elif line.startswith(";") or line == '': pass elif active_group_name: tokens = shlex.split(line) if len(tokens) == 0: continue hostname = tokens[0] port = C.DEFAULT_REMOTE_PORT # Three cases to check: # 0. A hostname that contains a range pesudo-code and a port # 1. A hostname that contains just a port if hostname.count(":") > 1: # Possible an IPv6 address, or maybe a host line with multiple ranges # IPv6 with Port XXX:XXX::XXX.port # FQDN foo.example.com if hostname.count(".") == 1: (hostname, port) = hostname.rsplit(".", 1) elif ("[" in hostname and "]" in hostname and ":" in hostname and (hostname.rindex("]") < hostname.rindex(":")) or ("]" not in hostname and ":" in hostname)): (hostname, port) = hostname.rsplit(":", 1) hostnames = [] if detect_range(hostname): hostnames = expand_hostname_range(hostname) else: hostnames = [hostname] for hn in hostnames: host = None if hn in self.hosts: host = self.hosts[hn] else: host = Host(name=hn, port=port) self.hosts[hn] = host if len(tokens) > 1: for t in tokens[1:]: if t.startswith('#'): break try: (k,v) = t.split("=", 1) except ValueError, e: raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e))) host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) # [southeast:children] # atlanta # raleigh def _parse_group_children(self): group = None for lineno in range(len(self.lines)): line = self.lines[lineno].strip() if line is None or line == '': continue if line.startswith("[") and ":children]" in line: line = line.replace("[","").replace(":children]","") group = self.groups.get(line, None) if group is None: group = self.groups[line] = Group(name=line) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): group = None elif group: kid_group = self.groups.get(line, None) if kid_group is None: raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line)) else: group.add_child_group(kid_group) # [webservers:vars] # http_port=1234 # maxRequestsPerChild=200 def _parse_group_variables(self): group = None for lineno in range(len(self.lines)): line = self.lines[lineno].strip() if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line)) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): group = None elif line == '': pass elif group: if "=" not in line: raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1)) else: (k, v) = [e.strip() for e in line.split("=", 1)] group.set_variable(k, self._parse_value(v)) def get_host_variables(self, host): return {}
SanketDG/networkx
refs/heads/master
networkx/tests/test_convert_pandas.py
43
from nose import SkipTest from nose.tools import assert_true import networkx as nx class TestConvertPandas(object): numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test @classmethod def setupClass(cls): try: import pandas as pd except ImportError: raise SkipTest('Pandas not available.') def __init__(self, ): global pd import pandas as pd self.r = pd.np.random.RandomState(seed=5) ints = self.r.random_integers(1, 10, size=(3,2)) a = ['A', 'B', 'C'] b = ['D', 'A', 'E'] df = pd.DataFrame(ints, columns=['weight', 'cost']) df[0] = a # Column label 0 (int) df['b'] = b # Column label 'b' (str) self.df = df def assert_equal(self, G1, G2): assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y )) def test_from_dataframe_all_attr(self, ): Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}), ('B', 'A', {'cost': 1, 'weight': 7}), ('A', 'D', {'cost': 7, 'weight': 4})]) G=nx.from_pandas_dataframe(self.df, 0, 'b', True) self.assert_equal(G, Gtrue) def test_from_dataframe_multi_attr(self, ): Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}), ('B', 'A', {'cost': 1, 'weight': 7}), ('A', 'D', {'cost': 7, 'weight': 4})]) G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost']) self.assert_equal(G, Gtrue) def test_from_dataframe_one_attr(self, ): Gtrue = nx.Graph([('E', 'C', {'weight': 10}), ('B', 'A', {'weight': 7}), ('A', 'D', {'weight': 4})]) G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight') self.assert_equal(G, Gtrue) def test_from_dataframe_no_attr(self, ): Gtrue = nx.Graph([('E', 'C', {}), ('B', 'A', {}), ('A', 'D', {})]) G=nx.from_pandas_dataframe(self.df, 0, 'b',) self.assert_equal(G, Gtrue)
Sentient07/scikit-learn
refs/heads/master
examples/ensemble/plot_forest_iris.py
335
""" ==================================================================== Plot the decision surfaces of ensembles of trees on the iris dataset ==================================================================== Plot the decision surfaces of forests of randomized trees trained on pairs of features of the iris dataset. This plot compares the decision surfaces learned by a decision tree classifier (first column), by a random forest classifier (second column), by an extra- trees classifier (third column) and by an AdaBoost classifier (fourth column). In the first row, the classifiers are built using the sepal width and the sepal length features only, on the second row using the petal length and sepal length only, and on the third row using the petal width and the petal length only. In descending order of quality, when trained (outside of this example) on all 4 features using 30 estimators and scored using 10 fold cross validation, we see:: ExtraTreesClassifier() # 0.95 score RandomForestClassifier() # 0.94 score AdaBoost(DecisionTree(max_depth=3)) # 0.94 score DecisionTree(max_depth=None) # 0.94 score Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but the average score does not improve). See the console's output for further details about each model. In this example you might try to: 1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and ``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the ``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier`` 2) vary ``n_estimators`` It is worth noting that RandomForests and ExtraTrees can be fitted in parallel on many cores as each tree is built independently of the others. AdaBoost's samples are built sequentially and so do not use multiple cores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import clone from sklearn.datasets import load_iris from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier) from sklearn.externals.six.moves import xrange from sklearn.tree import DecisionTreeClassifier # Parameters n_classes = 3 n_estimators = 30 plot_colors = "ryb" cmap = plt.cm.RdYlBu plot_step = 0.02 # fine step width for decision surface contours plot_step_coarser = 0.5 # step widths for coarse classifier guesses RANDOM_SEED = 13 # fix the seed on each iteration # Load data iris = load_iris() plot_idx = 1 models = [DecisionTreeClassifier(max_depth=None), RandomForestClassifier(n_estimators=n_estimators), ExtraTreesClassifier(n_estimators=n_estimators), AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), n_estimators=n_estimators)] for pair in ([0, 1], [0, 2], [2, 3]): for model in models: # We only take the two corresponding features X = iris.data[:, pair] y = iris.target # Shuffle idx = np.arange(X.shape[0]) np.random.seed(RANDOM_SEED) np.random.shuffle(idx) X = X[idx] y = y[idx] # Standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std # Train clf = clone(model) clf = model.fit(X, y) scores = clf.score(X, y) # Create a title for each column and the console by using str() and # slicing away useless parts of the string model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")] model_details = model_title if hasattr(model, "estimators_"): model_details += " with {} estimators".format(len(model.estimators_)) print( model_details + " with features", pair, "has a score of", scores ) plt.subplot(3, 4, plot_idx) if plot_idx <= len(models): # Add a title at the top of each column plt.title(model_title) # Now plot the decision boundary using a fine mesh as input to a # filled contour plot x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) # Plot either a single DecisionTreeClassifier or alpha blend the # decision surfaces of the ensemble of classifiers if isinstance(model, DecisionTreeClassifier): Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=cmap) else: # Choose alpha blend level with respect to the number of estimators # that are in use (noting that AdaBoost can use fewer estimators # than its maximum if it achieves a good enough fit early on) estimator_alpha = 1.0 / len(model.estimators_) for tree in model.estimators_: Z = tree.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) # Build a coarser grid to plot a set of ensemble classifications # to show how these are different to what we see in the decision # surfaces. These points are regularly space and do not have a black outline xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser), np.arange(y_min, y_max, plot_step_coarser)) Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape) cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none") # Plot the training points, these are clustered together and have a # black outline for i, c in zip(xrange(n_classes), plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i], cmap=cmap) plot_idx += 1 # move on to the next plot in sequence plt.suptitle("Classifiers on feature subsets of the Iris dataset") plt.axis("tight") plt.show()
sinotradition/sinoera
refs/heads/master
sinoera/tst/sinozodiac/test_tigerpensitive.py
1
#!/usr/bin/python #coding=utf-8 '''This is test module @author: sheng @contact: [email protected] @copyright: License according to the project license. ''' import unittest from sinoera.sinozodiac import tigerpensitive TestTigerpensitiveFunctions(unittest.TestCase): def setUp(self): pass def test_XXX(self): pass if __name__ == "__main__": unittest.main()
UTNkar/moore
refs/heads/development
src/home/migrations/0050_auto_20210401_1343.py
1
# Generated by Django 3.1.7 on 2021-04-01 11:43 import blocks.models from django.db import migrations import google.models import involvement.blocks.contact_card_block import wagtail.core.blocks import wagtail.core.fields import wagtail.images.blocks class Migration(migrations.Migration): dependencies = [ ('home', '0049_auto_20210202_1600'), ] operations = [ migrations.AlterField( model_name='formpage', name='intro_en', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('contact_card', involvement.blocks.contact_card_block.ContactCardBlock())], blank=True, verbose_name='English Introduction'), ), migrations.AlterField( model_name='formpage', name='intro_sv', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('contact_card', involvement.blocks.contact_card_block.ContactCardBlock())], blank=True, verbose_name='Swedish Introduction'), ), migrations.AlterField( model_name='formpage', name='thank_you_text_en', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('contact_card', involvement.blocks.contact_card_block.ContactCardBlock())], blank=True, verbose_name='English Thank You Text'), ), migrations.AlterField( model_name='formpage', name='thank_you_text_sv', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('contact_card', involvement.blocks.contact_card_block.ContactCardBlock())], blank=True, verbose_name='Swedish Thank You Text'), ), migrations.AlterField( model_name='homepage', name='body_en', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('news', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=False)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('index', wagtail.core.blocks.PageChooserBlock(page_type=['news.NewsIndexPage'])), ('items', wagtail.core.blocks.IntegerBlock())])), ('html', wagtail.core.blocks.RawHTMLBlock(group='Basic'))], blank=True), ), migrations.AlterField( model_name='homepage', name='body_sv', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('news', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=False)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('index', wagtail.core.blocks.PageChooserBlock(page_type=['news.NewsIndexPage'])), ('items', wagtail.core.blocks.IntegerBlock())])), ('html', wagtail.core.blocks.RawHTMLBlock(group='Basic'))], blank=True), ), migrations.AlterField( model_name='webpage', name='body_en', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('google_calendar', wagtail.core.blocks.StructBlock([('calendars', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('source', wagtail.core.blocks.CharBlock(help_text='Calendar ID as given by google calendar')), ('color', google.models.ColorBlock())]))), ('mode', wagtail.core.blocks.ChoiceBlock(choices=[('WEEK', 'Week'), ('', 'Month'), ('AGENDA', 'Agenda')], required=False)), ('height', wagtail.core.blocks.IntegerBlock()), ('background_color', google.models.ColorBlock()), ('week_start', wagtail.core.blocks.ChoiceBlock(choices=[('2', 'Monday'), ('1', 'Sunday'), ('7', 'Saturday')]))])), ('google_drive', wagtail.core.blocks.StructBlock([('folder_id', wagtail.core.blocks.CharBlock()), ('view', wagtail.core.blocks.ChoiceBlock(choices=[('list', 'List'), ('grid', 'Grid')])), ('height', wagtail.core.blocks.IntegerBlock())])), ('google_form', wagtail.core.blocks.StructBlock([('form_id', wagtail.core.blocks.CharBlock()), ('height', wagtail.core.blocks.IntegerBlock())])), ('news', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=False)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('index', wagtail.core.blocks.PageChooserBlock(page_type=['news.NewsIndexPage'])), ('items', wagtail.core.blocks.IntegerBlock())])), ('html', wagtail.core.blocks.RawHTMLBlock(group='Basic')), ('eventbrite', wagtail.core.blocks.StructBlock([('eventbriteToken', wagtail.core.blocks.CharBlock(required=True))]))], blank=True), ), migrations.AlterField( model_name='webpage', name='body_sv', field=wagtail.core.fields.StreamField([('section', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], help_text='Include padding for this section', required=False)), ('full_width', wagtail.core.blocks.BooleanBlock(help_text='Expand this section to full page width', required=False)), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))])), ('columns', wagtail.core.blocks.StructBlock([('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('width', wagtail.core.blocks.ChoiceBlock(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], help_text='Width out of 12')), ('content', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([])), ('Accordion', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('header', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.StreamBlock([('heading', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False))])), ('image', wagtail.core.blocks.StructBlock([('padding', wagtail.core.blocks.BooleanBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('height', wagtail.core.blocks.IntegerBlock(default=400, min_value=1))])), ('image_overlay', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False)), ('text_color', wagtail.core.blocks.ChoiceBlock(choices=[('text-light', 'Light'), ('text-dark', 'Dark')])), ('buttons', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))], required=False))])), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())])), ('divider', blocks.models.DividerBlock()), ('button_group', wagtail.core.blocks.StructBlock([('buttons', wagtail.core.blocks.ListBlock(blocks.models.ButtonBlock))])), ('icons', wagtail.core.blocks.StructBlock([('icons', wagtail.core.blocks.ListBlock(blocks.models.IconBlock))])), ('member_check', wagtail.core.blocks.StructBlock([]))]))])))]))]))])))])), ('two_column_grid', wagtail.core.blocks.StructBlock([('height', wagtail.core.blocks.IntegerBlock(default=400, help_text='Row height in px', max_value=800, min_value=1)), ('rows', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('flip', wagtail.core.blocks.BooleanBlock(help_text='Swap position of image and paragraph', required=False)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('paragraph', wagtail.core.blocks.StructBlock([('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('Left', 'Left'), ('Center', 'Center'), ('Right', 'Right')])), ('text', wagtail.core.blocks.RichTextBlock())]))])))])), ('countdown', wagtail.core.blocks.StructBlock([('id', wagtail.core.blocks.CharBlock()), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')])), ('expires', wagtail.core.blocks.DateTimeBlock()), ('pre_title', wagtail.core.blocks.CharBlock(required=False)), ('years_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('months_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('days_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('hours_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('minutes_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('seconds_label', wagtail.core.blocks.CharBlock(help_text='leave empty to skip this counter in the countdown', required=False)), ('post_title', wagtail.core.blocks.CharBlock(required=False))])), ('contacts', wagtail.core.blocks.StructBlock([('contacts', wagtail.core.blocks.ListBlock(involvement.blocks.contact_card_block.ContactCardBlock()))])), ('events', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('show_facebook', wagtail.core.blocks.BooleanBlock(help_text='Whether to embed a Facebook page', required=False)), ('facebook_page_name', wagtail.core.blocks.CharBlock(help_text='Name of the page to show. (Must be public or accessible by the registered app_id)', required=False)), ('show_instagram', wagtail.core.blocks.BooleanBlock(help_text='Whether to show Instagram the last event from the registered Instagram feed', required=False)), ('instagram_account_name', wagtail.core.blocks.CharBlock(help_text='The username of the instagram account, without @. The profile must be public', label='Instagram username', required=False)), ('show_youtube', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the last video from a Youtube-channel', required=False)), ('youtube_channel_id', wagtail.core.blocks.CharBlock(required=False)), ('show_google_calendar', wagtail.core.blocks.BooleanBlock(help_text='Whether to show the next few events from a google calendar', required=False)), ('google_calendar_id', wagtail.core.blocks.CharBlock(required=False))])), ('logos', wagtail.core.blocks.StructBlock([('logos', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('link', wagtail.core.blocks.URLBlock(required=False)), ('description', wagtail.core.blocks.CharBlock(required=False))])))])), ('counters', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('counters', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('icon', wagtail.core.blocks.CharBlock(help_text='Material icon font icon text, as found on: https://material.io/icons')), ('value', wagtail.core.blocks.CharBlock()), ('description', wagtail.core.blocks.CharBlock(required=False))]))), ('style', wagtail.core.blocks.ChoiceBlock(choices=[('light', 'Light'), ('dark', 'Dark')]))])), ('image_text_card', wagtail.core.blocks.StructBlock([('cards', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock()), ('text', wagtail.core.blocks.RichTextBlock(required=False))])))]))]))])), ('divider', blocks.models.DividerBlock()), ('google_calendar', wagtail.core.blocks.StructBlock([('calendars', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('source', wagtail.core.blocks.CharBlock(help_text='Calendar ID as given by google calendar')), ('color', google.models.ColorBlock())]))), ('mode', wagtail.core.blocks.ChoiceBlock(choices=[('WEEK', 'Week'), ('', 'Month'), ('AGENDA', 'Agenda')], required=False)), ('height', wagtail.core.blocks.IntegerBlock()), ('background_color', google.models.ColorBlock()), ('week_start', wagtail.core.blocks.ChoiceBlock(choices=[('2', 'Monday'), ('1', 'Sunday'), ('7', 'Saturday')]))])), ('google_drive', wagtail.core.blocks.StructBlock([('folder_id', wagtail.core.blocks.CharBlock()), ('view', wagtail.core.blocks.ChoiceBlock(choices=[('list', 'List'), ('grid', 'Grid')])), ('height', wagtail.core.blocks.IntegerBlock())])), ('google_form', wagtail.core.blocks.StructBlock([('form_id', wagtail.core.blocks.CharBlock()), ('height', wagtail.core.blocks.IntegerBlock())])), ('news', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=False)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('index', wagtail.core.blocks.PageChooserBlock(page_type=['news.NewsIndexPage'])), ('items', wagtail.core.blocks.IntegerBlock())])), ('html', wagtail.core.blocks.RawHTMLBlock(group='Basic')), ('eventbrite', wagtail.core.blocks.StructBlock([('eventbriteToken', wagtail.core.blocks.CharBlock(required=True))]))], blank=True), ), ]
timj/scons
refs/heads/master
test/Install/Clone.py
5
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Verify that we can Install() and InstallAs() from a construction environment cloned from a clone. """ import TestSCons test = TestSCons.TestSCons() test.write('SConstruct', """ env1 = Environment(DESTDIR='sub1', tools=[]) # Call env1.Install() but not env1.InstallAs() *before* we clone it. # This is to verify that re-initializing the Install() attribute on the # construction environment doesn't mess up the environment settings in # a way that leaves the InstallAs() intializer in place, which leads to # infinite recursion. env1.Install('$DESTDIR', 'foo.in') env2 = env1.Clone(DESTDIR='sub2') env3 = env2.Clone(DESTDIR='sub3') env2.Install('$DESTDIR', 'foo.in') env3.Install('$DESTDIR', 'foo.in') env1.InstallAs('$DESTDIR/foo.out', 'foo.in') env2.InstallAs('$DESTDIR/foo.out', 'foo.in') env3.InstallAs('$DESTDIR/foo.out', 'foo.in') """) test.write('foo.in', "foo.in\n") test.run(arguments = '.') test.must_match(['sub1', 'foo.in'], "foo.in\n") test.must_match(['sub2', 'foo.in'], "foo.in\n") test.must_match(['sub3', 'foo.in'], "foo.in\n") test.must_match(['sub1', 'foo.out'], "foo.in\n") test.must_match(['sub2', 'foo.out'], "foo.in\n") test.must_match(['sub3', 'foo.out'], "foo.in\n") test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
sharkykh/SickRage
refs/heads/develop
lib/tornado/test/twisted_test.py
18
# Author: Ovidiu Predescu # Date: July 2011 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unittest for the twisted-style reactor. """ from __future__ import absolute_import, division, print_function import logging import os import shutil import signal import sys import tempfile import threading import warnings from tornado.escape import utf8 from tornado import gen from tornado.httpclient import AsyncHTTPClient from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.platform.auto import set_close_exec from tornado.platform.select import SelectIOLoop from tornado.testing import bind_unused_port from tornado.test.util import unittest from tornado.util import import_object, PY3 from tornado.web import RequestHandler, Application try: import fcntl from twisted.internet.defer import Deferred, inlineCallbacks, returnValue # type: ignore from twisted.internet.interfaces import IReadDescriptor, IWriteDescriptor # type: ignore from twisted.internet.protocol import Protocol # type: ignore from twisted.python import log # type: ignore from tornado.platform.twisted import TornadoReactor, TwistedIOLoop from zope.interface import implementer # type: ignore have_twisted = True except ImportError: have_twisted = False # The core of Twisted 12.3.0 is available on python 3, but twisted.web is not # so test for it separately. try: from twisted.web.client import Agent, readBody # type: ignore from twisted.web.resource import Resource # type: ignore from twisted.web.server import Site # type: ignore # As of Twisted 15.0.0, twisted.web is present but fails our # tests due to internal str/bytes errors. have_twisted_web = sys.version_info < (3,) except ImportError: have_twisted_web = False if PY3: import _thread as thread else: import thread skipIfNoTwisted = unittest.skipUnless(have_twisted, "twisted module not present") skipIfPy26 = unittest.skipIf(sys.version_info < (2, 7), "twisted incompatible with singledispatch in py26") def save_signal_handlers(): saved = {} for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGCHLD]: saved[sig] = signal.getsignal(sig) if "twisted" in repr(saved): if not issubclass(IOLoop.configured_class(), TwistedIOLoop): # when the global ioloop is twisted, we expect the signal # handlers to be installed. Otherwise, it means we're not # cleaning up after twisted properly. raise Exception("twisted signal handlers already installed") return saved def restore_signal_handlers(saved): for sig, handler in saved.items(): signal.signal(sig, handler) class ReactorTestCase(unittest.TestCase): def setUp(self): self._saved_signals = save_signal_handlers() self._io_loop = IOLoop() self._reactor = TornadoReactor(self._io_loop) def tearDown(self): self._io_loop.close(all_fds=True) restore_signal_handlers(self._saved_signals) @skipIfNoTwisted class ReactorWhenRunningTest(ReactorTestCase): def test_whenRunning(self): self._whenRunningCalled = False self._anotherWhenRunningCalled = False self._reactor.callWhenRunning(self.whenRunningCallback) self._reactor.run() self.assertTrue(self._whenRunningCalled) self.assertTrue(self._anotherWhenRunningCalled) def whenRunningCallback(self): self._whenRunningCalled = True self._reactor.callWhenRunning(self.anotherWhenRunningCallback) self._reactor.stop() def anotherWhenRunningCallback(self): self._anotherWhenRunningCalled = True @skipIfNoTwisted class ReactorCallLaterTest(ReactorTestCase): def test_callLater(self): self._laterCalled = False self._now = self._reactor.seconds() self._timeout = 0.001 dc = self._reactor.callLater(self._timeout, self.callLaterCallback) self.assertEqual(self._reactor.getDelayedCalls(), [dc]) self._reactor.run() self.assertTrue(self._laterCalled) self.assertTrue(self._called - self._now > self._timeout) self.assertEqual(self._reactor.getDelayedCalls(), []) def callLaterCallback(self): self._laterCalled = True self._called = self._reactor.seconds() self._reactor.stop() @skipIfNoTwisted class ReactorTwoCallLaterTest(ReactorTestCase): def test_callLater(self): self._later1Called = False self._later2Called = False self._now = self._reactor.seconds() self._timeout1 = 0.0005 dc1 = self._reactor.callLater(self._timeout1, self.callLaterCallback1) self._timeout2 = 0.001 dc2 = self._reactor.callLater(self._timeout2, self.callLaterCallback2) self.assertTrue(self._reactor.getDelayedCalls() == [dc1, dc2] or self._reactor.getDelayedCalls() == [dc2, dc1]) self._reactor.run() self.assertTrue(self._later1Called) self.assertTrue(self._later2Called) self.assertTrue(self._called1 - self._now > self._timeout1) self.assertTrue(self._called2 - self._now > self._timeout2) self.assertEqual(self._reactor.getDelayedCalls(), []) def callLaterCallback1(self): self._later1Called = True self._called1 = self._reactor.seconds() def callLaterCallback2(self): self._later2Called = True self._called2 = self._reactor.seconds() self._reactor.stop() @skipIfNoTwisted class ReactorCallFromThreadTest(ReactorTestCase): def setUp(self): super(ReactorCallFromThreadTest, self).setUp() self._mainThread = thread.get_ident() def tearDown(self): self._thread.join() super(ReactorCallFromThreadTest, self).tearDown() def _newThreadRun(self): self.assertNotEqual(self._mainThread, thread.get_ident()) if hasattr(self._thread, 'ident'): # new in python 2.6 self.assertEqual(self._thread.ident, thread.get_ident()) self._reactor.callFromThread(self._fnCalledFromThread) def _fnCalledFromThread(self): self.assertEqual(self._mainThread, thread.get_ident()) self._reactor.stop() def _whenRunningCallback(self): self._thread = threading.Thread(target=self._newThreadRun) self._thread.start() def testCallFromThread(self): self._reactor.callWhenRunning(self._whenRunningCallback) self._reactor.run() @skipIfNoTwisted class ReactorCallInThread(ReactorTestCase): def setUp(self): super(ReactorCallInThread, self).setUp() self._mainThread = thread.get_ident() def _fnCalledInThread(self, *args, **kwargs): self.assertNotEqual(thread.get_ident(), self._mainThread) self._reactor.callFromThread(lambda: self._reactor.stop()) def _whenRunningCallback(self): self._reactor.callInThread(self._fnCalledInThread) def testCallInThread(self): self._reactor.callWhenRunning(self._whenRunningCallback) self._reactor.run() if have_twisted: @implementer(IReadDescriptor) class Reader(object): def __init__(self, fd, callback): self._fd = fd self._callback = callback def logPrefix(self): return "Reader" def close(self): self._fd.close() def fileno(self): return self._fd.fileno() def readConnectionLost(self, reason): self.close() def connectionLost(self, reason): self.close() def doRead(self): self._callback(self._fd) @implementer(IWriteDescriptor) class Writer(object): def __init__(self, fd, callback): self._fd = fd self._callback = callback def logPrefix(self): return "Writer" def close(self): self._fd.close() def fileno(self): return self._fd.fileno() def connectionLost(self, reason): self.close() def doWrite(self): self._callback(self._fd) @skipIfNoTwisted class ReactorReaderWriterTest(ReactorTestCase): def _set_nonblocking(self, fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) def setUp(self): super(ReactorReaderWriterTest, self).setUp() r, w = os.pipe() self._set_nonblocking(r) self._set_nonblocking(w) set_close_exec(r) set_close_exec(w) self._p1 = os.fdopen(r, "rb", 0) self._p2 = os.fdopen(w, "wb", 0) def tearDown(self): super(ReactorReaderWriterTest, self).tearDown() self._p1.close() self._p2.close() def _testReadWrite(self): """ In this test the writer writes an 'x' to its fd. The reader reads it, check the value and ends the test. """ self.shouldWrite = True def checkReadInput(fd): self.assertEquals(fd.read(1), b'x') self._reactor.stop() def writeOnce(fd): if self.shouldWrite: self.shouldWrite = False fd.write(b'x') self._reader = Reader(self._p1, checkReadInput) self._writer = Writer(self._p2, writeOnce) self._reactor.addWriter(self._writer) # Test that adding the reader twice adds it only once to # IOLoop. self._reactor.addReader(self._reader) self._reactor.addReader(self._reader) def testReadWrite(self): self._reactor.callWhenRunning(self._testReadWrite) self._reactor.run() def _testNoWriter(self): """ In this test we have no writer. Make sure the reader doesn't read anything. """ def checkReadInput(fd): self.fail("Must not be called.") def stopTest(): # Close the writer here since the IOLoop doesn't know # about it. self._writer.close() self._reactor.stop() self._reader = Reader(self._p1, checkReadInput) # We create a writer, but it should never be invoked. self._writer = Writer(self._p2, lambda fd: fd.write('x')) # Test that adding and removing the writer leaves us with no writer. self._reactor.addWriter(self._writer) self._reactor.removeWriter(self._writer) # Test that adding and removing the reader doesn't cause # unintended effects. self._reactor.addReader(self._reader) # Wake up after a moment and stop the test self._reactor.callLater(0.001, stopTest) def testNoWriter(self): self._reactor.callWhenRunning(self._testNoWriter) self._reactor.run() # Test various combinations of twisted and tornado http servers, # http clients, and event loop interfaces. @skipIfNoTwisted @unittest.skipIf(not have_twisted_web, 'twisted web not present') class CompatibilityTests(unittest.TestCase): def setUp(self): self.saved_signals = save_signal_handlers() self.io_loop = IOLoop() self.io_loop.make_current() self.reactor = TornadoReactor(self.io_loop) def tearDown(self): self.reactor.disconnectAll() self.io_loop.clear_current() self.io_loop.close(all_fds=True) restore_signal_handlers(self.saved_signals) def start_twisted_server(self): class HelloResource(Resource): isLeaf = True def render_GET(self, request): return "Hello from twisted!" site = Site(HelloResource()) port = self.reactor.listenTCP(0, site, interface='127.0.0.1') self.twisted_port = port.getHost().port def start_tornado_server(self): class HelloHandler(RequestHandler): def get(self): self.write("Hello from tornado!") app = Application([('/', HelloHandler)], log_function=lambda x: None) server = HTTPServer(app, io_loop=self.io_loop) sock, self.tornado_port = bind_unused_port() server.add_sockets([sock]) def run_ioloop(self): self.stop_loop = self.io_loop.stop self.io_loop.start() self.reactor.fireSystemEvent('shutdown') def run_reactor(self): self.stop_loop = self.reactor.stop self.stop = self.reactor.stop self.reactor.run() def tornado_fetch(self, url, runner): responses = [] client = AsyncHTTPClient(self.io_loop) def callback(response): responses.append(response) self.stop_loop() client.fetch(url, callback=callback) runner() self.assertEqual(len(responses), 1) responses[0].rethrow() return responses[0] def twisted_fetch(self, url, runner): # http://twistedmatrix.com/documents/current/web/howto/client.html chunks = [] client = Agent(self.reactor) d = client.request(b'GET', utf8(url)) class Accumulator(Protocol): def __init__(self, finished): self.finished = finished def dataReceived(self, data): chunks.append(data) def connectionLost(self, reason): self.finished.callback(None) def callback(response): finished = Deferred() response.deliverBody(Accumulator(finished)) return finished d.addCallback(callback) def shutdown(failure): if hasattr(self, 'stop_loop'): self.stop_loop() elif failure is not None: # loop hasn't been initialized yet; try our best to # get an error message out. (the runner() interaction # should probably be refactored). try: failure.raiseException() except: logging.error('exception before starting loop', exc_info=True) d.addBoth(shutdown) runner() self.assertTrue(chunks) return ''.join(chunks) def twisted_coroutine_fetch(self, url, runner): body = [None] @gen.coroutine def f(): # This is simpler than the non-coroutine version, but it cheats # by reading the body in one blob instead of streaming it with # a Protocol. client = Agent(self.reactor) response = yield client.request(b'GET', utf8(url)) with warnings.catch_warnings(): # readBody has a buggy DeprecationWarning in Twisted 15.0: # https://twistedmatrix.com/trac/changeset/43379 warnings.simplefilter('ignore', category=DeprecationWarning) body[0] = yield readBody(response) self.stop_loop() self.io_loop.add_callback(f) runner() return body[0] def testTwistedServerTornadoClientIOLoop(self): self.start_twisted_server() response = self.tornado_fetch( 'http://127.0.0.1:%d' % self.twisted_port, self.run_ioloop) self.assertEqual(response.body, 'Hello from twisted!') def testTwistedServerTornadoClientReactor(self): self.start_twisted_server() response = self.tornado_fetch( 'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor) self.assertEqual(response.body, 'Hello from twisted!') def testTornadoServerTwistedClientIOLoop(self): self.start_tornado_server() response = self.twisted_fetch( 'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop) self.assertEqual(response, 'Hello from tornado!') def testTornadoServerTwistedClientReactor(self): self.start_tornado_server() response = self.twisted_fetch( 'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor) self.assertEqual(response, 'Hello from tornado!') @skipIfPy26 def testTornadoServerTwistedCoroutineClientIOLoop(self): self.start_tornado_server() response = self.twisted_coroutine_fetch( 'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop) self.assertEqual(response, 'Hello from tornado!') @skipIfNoTwisted @skipIfPy26 class ConvertDeferredTest(unittest.TestCase): def test_success(self): @inlineCallbacks def fn(): if False: # inlineCallbacks doesn't work with regular functions; # must have a yield even if it's unreachable. yield returnValue(42) f = gen.convert_yielded(fn()) self.assertEqual(f.result(), 42) def test_failure(self): @inlineCallbacks def fn(): if False: yield 1 / 0 f = gen.convert_yielded(fn()) with self.assertRaises(ZeroDivisionError): f.result() if have_twisted: # Import and run as much of twisted's test suite as possible. # This is unfortunately rather dependent on implementation details, # but there doesn't appear to be a clean all-in-one conformance test # suite for reactors. # # This is a list of all test suites using the ReactorBuilder # available in Twisted 11.0.0 and 11.1.0 (and a blacklist of # specific test methods to be disabled). twisted_tests = { 'twisted.internet.test.test_core.ObjectModelIntegrationTest': [], 'twisted.internet.test.test_core.SystemEventTestsBuilder': [ 'test_iterate', # deliberately not supported # Fails on TwistedIOLoop and AsyncIOLoop. 'test_runAfterCrash', ], 'twisted.internet.test.test_fdset.ReactorFDSetTestsBuilder': [ "test_lostFileDescriptor", # incompatible with epoll and kqueue ], 'twisted.internet.test.test_process.ProcessTestsBuilder': [ # Only work as root. Twisted's "skip" functionality works # with py27+, but not unittest2 on py26. 'test_changeGID', 'test_changeUID', # This test sometimes fails with EPIPE on a call to # kqueue.control. Happens consistently for me with # trollius but not asyncio or other IOLoops. 'test_childConnectionLost', ], # Process tests appear to work on OSX 10.7, but not 10.6 # 'twisted.internet.test.test_process.PTYProcessTestsBuilder': [ # 'test_systemCallUninterruptedByChildExit', # ], 'twisted.internet.test.test_tcp.TCPClientTestsBuilder': [ 'test_badContext', # ssl-related; see also SSLClientTestsMixin ], 'twisted.internet.test.test_tcp.TCPPortTestsBuilder': [ # These use link-local addresses and cause firewall prompts on mac 'test_buildProtocolIPv6AddressScopeID', 'test_portGetHostOnIPv6ScopeID', 'test_serverGetHostOnIPv6ScopeID', 'test_serverGetPeerOnIPv6ScopeID', ], 'twisted.internet.test.test_tcp.TCPConnectionTestsBuilder': [], 'twisted.internet.test.test_tcp.WriteSequenceTests': [], 'twisted.internet.test.test_tcp.AbortConnectionTestCase': [], 'twisted.internet.test.test_threads.ThreadTestsBuilder': [], 'twisted.internet.test.test_time.TimeTestsBuilder': [], # Extra third-party dependencies (pyOpenSSL) # 'twisted.internet.test.test_tls.SSLClientTestsMixin': [], 'twisted.internet.test.test_udp.UDPServerTestsBuilder': [], 'twisted.internet.test.test_unix.UNIXTestsBuilder': [ # Platform-specific. These tests would be skipped automatically # if we were running twisted's own test runner. 'test_connectToLinuxAbstractNamespace', 'test_listenOnLinuxAbstractNamespace', # These tests use twisted's sendmsg.c extension and sometimes # fail with what looks like uninitialized memory errors # (more common on pypy than cpython, but I've seen it on both) 'test_sendFileDescriptor', 'test_sendFileDescriptorTriggersPauseProducing', 'test_descriptorDeliveredBeforeBytes', 'test_avoidLeakingFileDescriptors', ], 'twisted.internet.test.test_unix.UNIXDatagramTestsBuilder': [ 'test_listenOnLinuxAbstractNamespace', ], 'twisted.internet.test.test_unix.UNIXPortTestsBuilder': [], } if sys.version_info >= (3,): # In Twisted 15.2.0 on Python 3.4, the process tests will try to run # but fail, due in part to interactions between Tornado's strict # warnings-as-errors policy and Twisted's own warning handling # (it was not obvious how to configure the warnings module to # reconcile the two), and partly due to what looks like a packaging # error (process_cli.py missing). For now, just skip it. del twisted_tests['twisted.internet.test.test_process.ProcessTestsBuilder'] for test_name, blacklist in twisted_tests.items(): try: test_class = import_object(test_name) except (ImportError, AttributeError): continue for test_func in blacklist: # type: ignore if hasattr(test_class, test_func): # The test_func may be defined in a mixin, so clobber # it instead of delattr() setattr(test_class, test_func, lambda self: None) def make_test_subclass(test_class): class TornadoTest(test_class): # type: ignore _reactors = ["tornado.platform.twisted._TestReactor"] def setUp(self): # Twisted's tests expect to be run from a temporary # directory; they create files in their working directory # and don't always clean up after themselves. self.__curdir = os.getcwd() self.__tempdir = tempfile.mkdtemp() os.chdir(self.__tempdir) super(TornadoTest, self).setUp() # type: ignore def tearDown(self): super(TornadoTest, self).tearDown() # type: ignore os.chdir(self.__curdir) shutil.rmtree(self.__tempdir) def flushWarnings(self, *args, **kwargs): # This is a hack because Twisted and Tornado have # differing approaches to warnings in tests. # Tornado sets up a global set of warnings filters # in runtests.py, while Twisted patches the filter # list in each test. The net effect is that # Twisted's tests run with Tornado's increased # strictness (BytesWarning and ResourceWarning are # enabled) but without our filter rules to ignore those # warnings from Twisted code. filtered = [] for w in super(TornadoTest, self).flushWarnings( # type: ignore *args, **kwargs): if w['category'] in (BytesWarning, ResourceWarning): continue filtered.append(w) return filtered def buildReactor(self): self.__saved_signals = save_signal_handlers() return test_class.buildReactor(self) def unbuildReactor(self, reactor): test_class.unbuildReactor(self, reactor) # Clean up file descriptors (especially epoll/kqueue # objects) eagerly instead of leaving them for the # GC. Unfortunately we can't do this in reactor.stop # since twisted expects to be able to unregister # connections in a post-shutdown hook. reactor._io_loop.close(all_fds=True) restore_signal_handlers(self.__saved_signals) TornadoTest.__name__ = test_class.__name__ return TornadoTest test_subclass = make_test_subclass(test_class) globals().update(test_subclass.makeTestCaseClasses()) # Since we're not using twisted's test runner, it's tricky to get # logging set up well. Most of the time it's easiest to just # leave it turned off, but while working on these tests you may want # to uncomment one of the other lines instead. log.defaultObserver.stop() # import sys; log.startLogging(sys.stderr, setStdout=0) # log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0) # import logging; logging.getLogger('twisted').setLevel(logging.WARNING) # Twisted recently introduced a new logger; disable that one too. try: from twisted.logger import globalLogBeginner # type: ignore except ImportError: pass else: globalLogBeginner.beginLoggingTo([], redirectStandardIO=False) if have_twisted: class LayeredTwistedIOLoop(TwistedIOLoop): """Layers a TwistedIOLoop on top of a TornadoReactor on a SelectIOLoop. This is of course silly, but is useful for testing purposes to make sure we're implementing both sides of the various interfaces correctly. In some tests another TornadoReactor is layered on top of the whole stack. """ def initialize(self, **kwargs): # When configured to use LayeredTwistedIOLoop we can't easily # get the next-best IOLoop implementation, so use the lowest common # denominator. self.real_io_loop = SelectIOLoop(make_current=False) # type: ignore reactor = TornadoReactor(io_loop=self.real_io_loop) super(LayeredTwistedIOLoop, self).initialize(reactor=reactor, **kwargs) self.add_callback(self.make_current) def close(self, all_fds=False): super(LayeredTwistedIOLoop, self).close(all_fds=all_fds) # HACK: This is the same thing that test_class.unbuildReactor does. for reader in self.reactor._internalReaders: self.reactor.removeReader(reader) reader.connectionLost(None) self.real_io_loop.close(all_fds=all_fds) def stop(self): # One of twisted's tests fails if I don't delay crash() # until the reactor has started, but if I move this to # TwistedIOLoop then the tests fail when I'm *not* running # tornado-on-twisted-on-tornado. I'm clearly missing something # about the startup/crash semantics, but since stop and crash # are really only used in tests it doesn't really matter. def f(): self.reactor.crash() # Become current again on restart. This is needed to # override real_io_loop's claim to being the current loop. self.add_callback(self.make_current) self.reactor.callWhenRunning(f) if __name__ == "__main__": unittest.main()
abenzbiria/clients_odoo
refs/heads/master
addons/hr_attendance/res_config.py
434
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class hr_attendance_config_settings(osv.osv_memory): _inherit = 'hr.config.settings' _columns = { 'group_hr_attendance': fields.boolean('Track attendances for all employees', implied_group='base.group_hr_attendance', help="Allocates attendance group to all users."), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
kyle-eshares/django-strict
refs/heads/master
strict_app/bibliotek/tests.py
24123
from django.test import TestCase # Create your tests here.
leiferikb/bitpop
refs/heads/master
build/third_party/twisted_10_2/twisted/conch/test/__init__.py
147
'conch tests'
luvit/gyp
refs/heads/luvit-dev
pylib/gyp/input_test.py
604
#!/usr/bin/env python # Copyright 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the input.py file.""" import gyp.input import unittest import sys class TestFindCycles(unittest.TestCase): def setUp(self): self.nodes = {} for x in ('a', 'b', 'c', 'd', 'e'): self.nodes[x] = gyp.input.DependencyGraphNode(x) def _create_dependency(self, dependent, dependency): dependent.dependencies.append(dependency) dependency.dependents.append(dependent) def test_no_cycle_empty_graph(self): for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_line(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_dag(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['a'], self.nodes['c']) self._create_dependency(self.nodes['b'], self.nodes['c']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_cycle_self_reference(self): self._create_dependency(self.nodes['a'], self.nodes['a']) self.assertEquals([(self.nodes['a'], self.nodes['a'])], self.nodes['a'].FindCycles()) def test_cycle_two_nodes(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['a'])], self.nodes['a'].FindCycles()) self.assertEquals([(self.nodes['b'], self.nodes['a'], self.nodes['b'])], self.nodes['b'].FindCycles()) def test_two_cycles(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['b']) cycles = self.nodes['a'].FindCycles() self.assertTrue( (self.nodes['a'], self.nodes['b'], self.nodes['a']) in cycles) self.assertTrue( (self.nodes['b'], self.nodes['c'], self.nodes['b']) in cycles) self.assertEquals(2, len(cycles)) def test_big_cycle(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) self._create_dependency(self.nodes['d'], self.nodes['e']) self._create_dependency(self.nodes['e'], self.nodes['a']) self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['c'], self.nodes['d'], self.nodes['e'], self.nodes['a'])], self.nodes['a'].FindCycles()) if __name__ == '__main__': unittest.main()
lucafavatella/intellij-community
refs/heads/cli-wip
python/testData/refactoring/extractmethod/WrongSelectionFromImportStar.before.py
83
<selection>from mymodule import *</selection>
davidzchen/tensorflow
refs/heads/master
tensorflow/python/keras/layers/convolutional_transpose_test.py
5
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for convolutional transpose layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.platform import test @keras_parameterized.run_all_keras_modes class Conv2DTransposeTest(keras_parameterized.TestCase): def _run_test(self, kwargs): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Conv2DTranspose, kwargs=kwargs, input_shape=(num_samples, num_row, num_col, stack_size)) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}), ('padding_same', {'padding': 'same'}), ('strides', {'strides': (2, 2)}), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ('strides_output_padding', {'strides': (2, 2), 'output_padding': (1, 1)}), ) def test_conv2d_transpose(self, kwargs): kwargs['filters'] = 2 kwargs['kernel_size'] = (3, 3) if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs) def test_conv2d_transpose_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv2DTranspose(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv2d_transpose_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv2DTranspose(**kwargs) layer.build((None, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv2d_transpose_dilation(self): testing_utils.layer_test(keras.layers.Conv2DTranspose, kwargs={'filters': 2, 'kernel_size': 3, 'padding': 'same', 'data_format': 'channels_last', 'dilation_rate': (2, 2)}, input_shape=(2, 5, 6, 3)) input_data = np.arange(48).reshape((1, 4, 4, 3)).astype(np.float32) expected_output = np.float32([[192, 228, 192, 228], [336, 372, 336, 372], [192, 228, 192, 228], [336, 372, 336, 372]]).reshape((1, 4, 4, 1)) testing_utils.layer_test(keras.layers.Conv2DTranspose, input_data=input_data, kwargs={'filters': 1, 'kernel_size': 3, 'padding': 'same', 'data_format': 'channels_last', 'dilation_rate': (2, 2), 'kernel_initializer': 'ones'}, expected_output=expected_output) @keras_parameterized.run_all_keras_modes class Conv3DTransposeTest(keras_parameterized.TestCase): def _run_test(self, kwargs): num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 depth = 5 with self.cached_session(use_gpu=True): testing_utils.layer_test( keras.layers.Conv3DTranspose, kwargs=kwargs, input_shape=(num_samples, depth, num_row, num_col, stack_size)) @parameterized.named_parameters( ('padding_valid', {'padding': 'valid'}), ('padding_same', {'padding': 'same'}), ('strides', {'strides': (2, 2, 2)}), # Only runs on GPU with CUDA, channels_first is not supported on CPU. # TODO(b/62340061): Support channels_first on CPU. ('data_format', {'data_format': 'channels_first'}), ('strides_output_padding', {'strides': (2, 2, 2), 'output_padding': (1, 1, 1)}), ) def test_conv3d_transpose(self, kwargs): kwargs['filters'] = 2 kwargs['kernel_size'] = (3, 3, 3) if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True): self._run_test(kwargs) def test_conv3d_transpose_regularizers(self): kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv3DTranspose(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(len(layer.losses), 2) layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2)))) self.assertEqual(len(layer.losses), 3) def test_conv3d_transpose_constraints(self): k_constraint = lambda x: x b_constraint = lambda x: x kwargs = { 'filters': 3, 'kernel_size': 3, 'padding': 'valid', 'kernel_constraint': k_constraint, 'bias_constraint': b_constraint, 'strides': 1 } with self.cached_session(use_gpu=True): layer = keras.layers.Conv3DTranspose(**kwargs) layer.build((None, 5, 5, 5, 2)) self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) def test_conv3d_transpose_dynamic_shape(self): input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32) with self.cached_session(use_gpu=True): # Won't raise error here. testing_utils.layer_test( keras.layers.Conv3DTranspose, kwargs={ 'data_format': 'channels_last', 'filters': 3, 'kernel_size': 3 }, input_shape=(None, None, None, None, 3), input_data=input_data) if test.is_gpu_available(cuda_only=True): testing_utils.layer_test( keras.layers.Conv3DTranspose, kwargs={ 'data_format': 'channels_first', 'filters': 3, 'kernel_size': 3 }, input_shape=(None, 3, None, None, None), input_data=input_data) if __name__ == '__main__': test.main()
classcat/cctf
refs/heads/master
cctf/datasets/mnist.py
1
"""Functions for downloading and reading MNIST data. Credits: Y. LeCun. http://yann.lecun.com/exdb/mnist/. """ from __future__ import print_function import gzip import os from six.moves import urllib import numpy SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' def load_data(one_hot=False): mnist = read_data_sets("mnist/", one_hot=one_hot) return mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels def maybe_download(filename, work_directory): """Download the data from Yann's website, unless it's already here.""" if not os.path.exists(work_directory): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if not os.path.exists(filepath): print('Downloading MNIST...') filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath def _read32(bytestream): dt = numpy.dtype(numpy.uint32).newbyteorder('>') return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data def dense_to_one_hot(labels_dense, num_classes=10): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] index_offset = numpy.arange(num_labels) * num_classes labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot def extract_labels(filename, one_hot=False): """Extract the labels into a 1D uint8 numpy array [index].""" print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, filename)) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels) return labels class DataSet(object): def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert images.shape[0] == labels.shape[0], ( "images.shape: %s labels.shape: %s" % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 @property def images(self): return self._images @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def next_batch(self, batch_size, fake_data=False): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1.0 for _ in range(784)] fake_label = 0 return [fake_image for _ in range(batch_size)], [ fake_label for _ in range(batch_size)] start = self._index_in_epoch self._index_in_epoch += batch_size if self._index_in_epoch > self._num_examples: # Finished epoch self._epochs_completed += 1 # Shuffle the data perm = numpy.arange(self._num_examples) numpy.random.shuffle(perm) self._images = self._images[perm] self._labels = self._labels[perm] # Start next epoch start = 0 self._index_in_epoch = batch_size assert batch_size <= self._num_examples end = self._index_in_epoch return self._images[start:end], self._labels[start:end] def read_data_sets(train_dir="mnist/", fake_data=False, one_hot=False): class DataSets(object): pass data_sets = DataSets() if fake_data: data_sets.train = DataSet([], [], fake_data=True) data_sets.validation = DataSet([], [], fake_data=True) data_sets.test = DataSet([], [], fake_data=True) return data_sets TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' TEST_IMAGES = 't10k-images-idx3-ubyte.gz' TEST_LABELS = 't10k-labels-idx1-ubyte.gz' VALIDATION_SIZE = 5000 local_file = maybe_download(TRAIN_IMAGES, train_dir) train_images = extract_images(local_file) local_file = maybe_download(TRAIN_LABELS, train_dir) train_labels = extract_labels(local_file, one_hot=one_hot) local_file = maybe_download(TEST_IMAGES, train_dir) test_images = extract_images(local_file) local_file = maybe_download(TEST_LABELS, train_dir) test_labels = extract_labels(local_file, one_hot=one_hot) validation_images = train_images[:VALIDATION_SIZE] validation_labels = train_labels[:VALIDATION_SIZE] train_images = train_images[VALIDATION_SIZE:] train_labels = train_labels[VALIDATION_SIZE:] data_sets.train = DataSet(train_images, train_labels) data_sets.validation = DataSet(validation_images, validation_labels) data_sets.test = DataSet(test_images, test_labels) return data_sets
hradec/gaffer
refs/heads/master
python/GafferSceneTest/ModuleTest.py
8
########################################################################## # # Copyright (c) 2014, John Haddon. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import GafferTest class ModuleTest( GafferTest.TestCase ) : def testNamespacePollution( self ) : import GafferScene self.assertRaises( AttributeError, getattr, GafferScene, "IECore" ) self.assertRaises( AttributeError, getattr, GafferScene, "Gaffer" ) self.assertRaises( AttributeError, getattr, GafferScene, "GafferScene" ) self.assertRaises( AttributeError, getattr, GafferScene, "GafferImage" ) def testDoesNotImportUI( self ) : self.assertModuleDoesNotImportUI( "GafferScene" ) self.assertModuleDoesNotImportUI( "GafferSceneTest" ) if __name__ == "__main__": unittest.main()
nlativy/scritti
refs/heads/master
posts/admin.py
1
from scritti.posts.models import Post from django.contrib import admin class PostAdmin(admin.ModelAdmin): fieldsets = [ ('Post', {'fields': ['title', 'body']}), ('Meta', {'fields': ['slug', 'tags', 'author']}), ('Publication', {'fields': ['published', 'allow_comments', 'is_page']}), ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}), ] list_display = ('title', 'pub_date', 'published', 'allow_comments', 'is_page', 'tags') list_filter = ('published', 'is_page', 'pub_date') prepopulated_fields = {'slug': ('title',)} search_fields = ('title', 'body', 'tags') ordering = ('-pub_date',) date_hierarchy = 'pub_date' admin.site.register(Post, PostAdmin)
Maria1099/webm.webmlive
refs/heads/master
testing/webmstreamserver.py
5
#!/usr/bin/python2.4 # Copyright (c) 2011 The WebM project authors. All Rights Reserved. # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. import cgi import datetime import os.path import time from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer FILENAME = 'test.webm' POSTCOUNT = 0 class WebMStreamServer(BaseHTTPRequestHandler): def do_POST(self): global POSTCOUNT try: ctype, pdict = cgi.parse_header(self.headers.getheader('content-type')) self.file = file(FILENAME, 'ab') if self.path.startswith("/dash"): # Hooooorible... terrrrrrible hack: post count is magic! if POSTCOUNT == 0: # this is the manifest print "manifest" print "%s" % self.headers mpd_file = open('webmlive.mpd', 'w') mpd_file.write(self.rfile.read(int(self.headers['content-length']))) mpd_file.close() elif POSTCOUNT == 1: # this is the hdr chunk print "header chunk" hdr_file = open('webmlive_webmlive.hdr', 'wb') hdr_file.write(self.rfile.read(int(self.headers['content-length']))) hdr_file.close() else: # this and all following chunks are media data print "media chunk" fname = "webmlive_webmlive_" + str(POSTCOUNT-1) + ".chk" chk_file = open(fname, 'wb') chk_file.write(self.rfile.read(int(self.headers['content-length']))) chk_file.close() self.send_response(200) POSTCOUNT += 1 print "POSTCOUNT = " + str(POSTCOUNT) else: print self.path if ctype == 'multipart/form-data': query = cgi.parse_multipart(self.rfile, pdict) upfilecontent = query.get('webm_file') self.file.write(upfilecontent[0]) self.send_response(200) self.wfile.write('Post OK') elif ctype == 'video/webm': length = int(self.headers['content-length']) if length > 0: self.file.write(self.rfile.read(length)) self.send_response(200) self.wfile.write('Post OK') else: print 'post has 0 content-length (or is missing field)!' self.send_response(400) self.wfile.write('bad/missing content-length') else: print 'unsupported content-type, cannot handle POST!' self.send_response(500) self.wfile.write('Unsupported content-type') self.file.close() self.end_headers() except: print "you suck!" pass def main(): try: if os.path.exists(FILENAME): print "removed " + FILENAME os.remove(FILENAME) server = HTTPServer(('', 8000), WebMStreamServer) print 'started streaming server...' server.serve_forever() except KeyboardInterrupt: print ' shutting down server...' server.socket.close() if __name__ == '__main__': main()
gistic/PublicSpatialImpala
refs/heads/master
thirdparty/hive-0.10.0-cdh4.5.0/lib/py/thrift/reflection/limited/__init__.py
51
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = ['ttypes', 'constants']
aaronsw/watchdog
refs/heads/master
vendor/rdflib-2.4.0/rdflib/syntax/parsers/ntriples.py
4
#!/usr/bin/env python """ N-Triples Parser License: GPL 2, W3C, BSD, or MIT Author: Sean B. Palmer, inamidst.com Documentation: http://inamidst.com/proj/rdf/ntriples-doc Command line usage: ./ntriples.py <URI> - parses URI as N-Triples ./ntriples.py --help - prints out this help message # @@ fully empty document? """ import re uriref = r'<([^:]+:[^\s"<>]+)>' literal = r'"([^"\\]*(?:\\.[^"\\]*)*)"' litinfo = r'(?:@([a-z]+(?:-[a-z0-9]+)*)|\^\^' + uriref + r')?' r_line = re.compile(r'([^\r\n]*)(?:\r\n|\r|\n)') r_wspace = re.compile(r'[ \t]*') r_wspaces = re.compile(r'[ \t]+') r_tail = re.compile(r'[ \t]*\.[ \t]*') r_uriref = re.compile(uriref) r_nodeid = re.compile(r'_:([A-Za-z][A-Za-z0-9]*)') r_literal = re.compile(literal + litinfo) bufsiz = 2048 validate = False class Node(unicode): pass # class URI(Node): pass # class bNode(Node): pass # class Literal(Node): # def __new__(cls, lit, lang=None, dtype=None): # n = str(lang) + ' ' + str(dtype) + ' ' + lit # return unicode.__new__(cls, n) from rdflib import URIRef as URI from rdflib import BNode as bNode from rdflib import Literal class Sink(object): def __init__(self): self.length = 0 def triple(self, s, p, o): self.length += 1 print (s, p, o) class ParseError(Exception): pass quot = {'t': '\t', 'n': '\n', 'r': '\r', '"': '"', '\\': '\\'} r_safe = re.compile(r'([\x20\x21\x23-\x5B\x5D-\x7E]+)') r_quot = re.compile(r'\\(t|n|r|"|\\)') r_uniquot = re.compile(r'\\u([0-9A-F]{4})|\\U([0-9A-F]{8})') def unquote(s): """Unquote an N-Triples string.""" result = [] while s: m = r_safe.match(s) if m: s = s[m.end():] result.append(m.group(1)) continue m = r_quot.match(s) if m: s = s[2:] result.append(quot[m.group(1)]) continue m = r_uniquot.match(s) if m: s = s[m.end():] u, U = m.groups() codepoint = int(u or U, 16) if codepoint > 0x10FFFF: raise ParseError("Disallowed codepoint: %08X" % codepoint) result.append(unichr(codepoint)) elif s.startswith('\\'): raise ParseError("Illegal escape at: %s..." % s[:10]) else: raise ParseError("Illegal literal character: %r" % s[0]) return unicode(''.join(result)) if not validate: def unquote(s): return s.decode('unicode-escape') r_hibyte = re.compile(r'([\x80-\xFF])') def uriquote(uri): return r_hibyte.sub(lambda m: '%%%02X' % ord(m.group(1)), uri) if not validate: def uriquote(uri): return uri class NTriplesParser(object): """An N-Triples Parser. Usage: p = NTriplesParser(sink=MySink()) sink = p.parse(f) # file; use parsestring for a string """ def __init__(self, sink=None): if sink is not None: self.sink = sink else: self.sink = Sink() def parse(self, f): """Parse f as an N-Triples file.""" if not hasattr(f, 'read'): raise ParseError("Item to parse must be a file-like object.") self.file = f self.buffer = '' while True: self.line = self.readline() if self.line is None: break try: self.parseline() except ParseError: raise ParseError("Invalid line: %r" % self.line) return self.sink def parsestring(self, s): """Parse s as an N-Triples string.""" if not isinstance(s, basestring): raise ParseError("Item to parse must be a string instance.") from cStringIO import StringIO f = StringIO() f.write(s) f.seek(0) self.parse(f) def readline(self): """Read an N-Triples line from buffered input.""" # N-Triples lines end in either CRLF, CR, or LF # Therefore, we can't just use f.readline() if not self.buffer: buffer = self.file.read(bufsiz) if not buffer: return None self.buffer = buffer while True: m = r_line.match(self.buffer) if m: # the more likely prospect self.buffer = self.buffer[m.end():] return m.group(1) else: buffer = self.file.read(bufsiz) if not buffer: raise ParseError("EOF in line") self.buffer += buffer def parseline(self): self.eat(r_wspace) if (not self.line) or self.line.startswith('#'): return # The line is empty or a comment subject = self.subject() self.eat(r_wspaces) predicate = self.predicate() self.eat(r_wspaces) object = self.object() self.eat(r_tail) if self.line: raise ParseError("Trailing garbage") self.sink.triple(subject, predicate, object) def peek(self, token): return self.line.startswith(token) def eat(self, pattern): m = pattern.match(self.line) if not m: # @@ Why can't we get the original pattern? raise ParseError("Failed to eat %s" % pattern) self.line = self.line[m.end():] return m def subject(self): # @@ Consider using dictionary cases subj = self.uriref() or self.nodeid() if not subj: raise ParseError("Subject must be uriref or nodeID") return subj def predicate(self): pred = self.uriref() if not pred: raise ParseError("Predicate must be uriref") return pred def object(self): objt = self.uriref() or self.nodeid() or self.literal() if objt is False: raise ParseError("Unrecognised object type") return objt def uriref(self): if self.peek('<'): uri = self.eat(r_uriref).group(1) uri = unquote(uri) uri = uriquote(uri) return URI(uri) return False def nodeid(self): if self.peek('_'): return bNode(self.eat(r_nodeid).group(1)) return False def literal(self): if self.peek('"'): lit, lang, dtype = self.eat(r_literal).groups() lang = lang or None dtype = dtype or None if lang and dtype: raise ParseError("Can't have both a language and a datatype") lit = unquote(lit) return Literal(lit, lang, dtype) return False def parseURI(uri): import urllib parser = NTriplesParser() u = urllib.urlopen(uri) sink = parser.parse(u) u.close() # for triple in sink: # print triple print 'Length of input:', sink.length def main(): import sys if len(sys.argv) == 2: parseURI(sys.argv[1]) else: print __doc__ if __name__=="__main__": main()
authman/Python201609
refs/heads/master
Wright_Will/Assignments/login_and_registration_pylot/Pylot/system/core/router.py
10
""" System Core Router File Defines the verbs and the routes dictionary for use in the routes config file """ routes = {} routes['GET'] = {} routes['POST'] = {} routes['PUT'] = {} routes['PATCH'] = {} routes['DELETE'] = {}
Mj258/weiboapi
refs/heads/master
srapyDemo/envs/Lib/site-packages/pythonwin/pywin/Demos/app/demoutils.py
34
# Utilities for the demos import sys, win32api, win32con, win32ui NotScriptMsg = """\ This demo program is not designed to be run as a Script, but is probably used by some other test program. Please try another demo. """ NeedGUIMsg = """\ This demo program can only be run from inside of Pythonwin You must start Pythonwin, and select 'Run' from the toolbar or File menu """ NeedAppMsg = """\ This demo program is a 'Pythonwin Application'. It is more demo code than an example of Pythonwin's capabilities. To run it, you must execute the command: pythonwin.exe /app "%s" Would you like to execute it now? """ def NotAScript(): import win32ui win32ui.MessageBox(NotScriptMsg, "Demos") def NeedGoodGUI(): from pywin.framework.app import HaveGoodGUI rc = HaveGoodGUI() if not rc: win32ui.MessageBox(NeedGUIMsg, "Demos") return rc def NeedApp(): import win32ui rc = win32ui.MessageBox(NeedAppMsg % sys.argv[0], "Demos", win32con.MB_YESNO) if rc==win32con.IDYES: try: parent = win32ui.GetMainFrame().GetSafeHwnd() win32api.ShellExecute(parent, None, 'pythonwin.exe', '/app "%s"' % sys.argv[0], None, 1) except win32api.error, details: win32ui.MessageBox("Error executing command - %s" % (details), "Demos") if __name__=='__main__': import demoutils demoutils.NotAScript()
ringemup/satchmo
refs/heads/master
satchmo/apps/payment/modules/trustcommerce/views.py
12
from livesettings import config_get_group from payment.views import confirm, payship def pay_ship_info(request): return payship.credit_pay_ship_info(request, config_get_group('PAYMENT_TRUSTCOMMERCE')) def confirm_info(request): return confirm.credit_confirm_info(request, config_get_group('PAYMENT_TRUSTCOMMERCE'))
abhiii5459/sympy
refs/heads/master
sympy/core/power.py
16
from __future__ import print_function, division from math import log as _log from .sympify import _sympify from .cache import cacheit from .singleton import S from .expr import Expr from .evalf import PrecisionExhausted from .function import (_coeff_isneg, expand_complex, expand_multinomial, expand_mul) from .logic import fuzzy_bool, fuzzy_not from .compatibility import as_int, range from .evaluate import global_evaluate from mpmath.libmp import sqrtrem as mpmath_sqrtrem from sympy.utilities.iterables import sift def integer_nthroot(y, n): """ Return a tuple containing x = floor(y**(1/n)) and a boolean indicating whether the result is exact (that is, whether x**n == y). >>> from sympy import integer_nthroot >>> integer_nthroot(16,2) (4, True) >>> integer_nthroot(26,2) (5, False) """ y, n = int(y), int(n) if y < 0: raise ValueError("y must be nonnegative") if n < 1: raise ValueError("n must be positive") if y in (0, 1): return y, True if n == 1: return y, True if n == 2: x, rem = mpmath_sqrtrem(y) return int(x), not rem if n > y: return 1, False # Get initial estimate for Newton's method. Care must be taken to # avoid overflow try: guess = int(y**(1./n) + 0.5) except OverflowError: exp = _log(y, 2)/n if exp > 53: shift = int(exp - 53) guess = int(2.0**(exp - shift) + 1) << shift else: guess = int(2.0**exp) if guess > 2**50: # Newton iteration xprev, x = -1, guess while 1: t = x**(n - 1) xprev, x = x, ((n - 1)*x + y//t)//n if abs(x - xprev) < 2: break else: x = guess # Compensate t = x**n while t < y: x += 1 t = x**n while t > y: x -= 1 t = x**n return x, t == y class Pow(Expr): """ Defines the expression x**y as "x raised to a power y" Singleton definitions involving (0, 1, -1, oo, -oo): +--------------+---------+-----------------------------------------------+ | expr | value | reason | +==============+=========+===============================================+ | z**0 | 1 | Although arguments over 0**0 exist, see [2]. | +--------------+---------+-----------------------------------------------+ | z**1 | z | | +--------------+---------+-----------------------------------------------+ | (-oo)**(-1) | 0 | | +--------------+---------+-----------------------------------------------+ | (-1)**-1 | -1 | | +--------------+---------+-----------------------------------------------+ | S.Zero**-1 | zoo | This is not strictly true, as 0**-1 may be | | | | undefined, but is convenient in some contexts | | | | where the base is assumed to be positive. | +--------------+---------+-----------------------------------------------+ | 1**-1 | 1 | | +--------------+---------+-----------------------------------------------+ | oo**-1 | 0 | | +--------------+---------+-----------------------------------------------+ | 0**oo | 0 | Because for all complex numbers z near | | | | 0, z**oo -> 0. | +--------------+---------+-----------------------------------------------+ | 0**-oo | zoo | This is not strictly true, as 0**oo may be | | | | oscillating between positive and negative | | | | values or rotating in the complex plane. | | | | It is convenient, however, when the base | | | | is positive. | +--------------+---------+-----------------------------------------------+ | 1**oo | nan | Because there are various cases where | | 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), | | 1**zoo | | but lim( x(t)**y(t), t) != 1. See [3]. | +--------------+---------+-----------------------------------------------+ | (-1)**oo | nan | Because of oscillations in the limit. | | (-1)**(-oo) | | | +--------------+---------+-----------------------------------------------+ | oo**oo | oo | | +--------------+---------+-----------------------------------------------+ | oo**-oo | 0 | | +--------------+---------+-----------------------------------------------+ | (-oo)**oo | nan | | | (-oo)**-oo | | | +--------------+---------+-----------------------------------------------+ Because symbolic computations are more flexible that floating point calculations and we prefer to never return an incorrect answer, we choose not to conform to all IEEE 754 conventions. This helps us avoid extra test-case code in the calculation of limits. See Also ======== sympy.core.numbers.Infinity sympy.core.numbers.NegativeInfinity sympy.core.numbers.NaN References ========== .. [1] http://en.wikipedia.org/wiki/Exponentiation .. [2] http://en.wikipedia.org/wiki/Exponentiation#Zero_to_the_power_of_zero .. [3] http://en.wikipedia.org/wiki/Indeterminate_forms """ is_Pow = True __slots__ = ['is_commutative'] @cacheit def __new__(cls, b, e, evaluate=None): if evaluate is None: evaluate = global_evaluate[0] from sympy.functions.elementary.exponential import exp_polar b = _sympify(b) e = _sympify(e) if evaluate: if e is S.Zero: return S.One elif e is S.One: return b elif e.is_integer and _coeff_isneg(b): if e.is_even: b = -b elif e.is_odd: return -Pow(-b, e) if S.NaN in (b, e): # XXX S.NaN**x -> S.NaN under assumption that x != 0 return S.NaN elif b is S.One: if abs(e).is_infinite: return S.NaN return S.One else: # recognize base as E if not e.is_Atom and b is not S.Exp1 and b.func is not exp_polar: from sympy import numer, denom, log, sign, im, factor_terms c, ex = factor_terms(e, sign=False).as_coeff_Mul() den = denom(ex) if den.func is log and den.args[0] == b: return S.Exp1**(c*numer(ex)) elif den.is_Add: s = sign(im(b)) if s.is_Number and s and den == \ log(-factor_terms(b, sign=False)) + s*S.ImaginaryUnit*S.Pi: return S.Exp1**(c*numer(ex)) obj = b._eval_power(e) if obj is not None: return obj obj = Expr.__new__(cls, b, e) obj.is_commutative = (b.is_commutative and e.is_commutative) return obj @property def base(self): return self._args[0] @property def exp(self): return self._args[1] @classmethod def class_key(cls): return 3, 2, cls.__name__ def _eval_power(self, other): from sympy import Abs, arg, exp, floor, im, log, re, sign, refine b, e = self.as_base_exp() if b is S.NaN: return (b**e)**other # let __new__ handle it s = None if other.is_integer: s = 1 elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)... s = 1 elif e.is_real is not None: # helper functions =========================== def _half(e): """Return True if the exponent has a literal 2 as the denominator, else None.""" if getattr(e, 'q', None) == 2: return True n, d = e.as_numer_denom() if n.is_integer and d == 2: return True def _n2(e): """Return ``e`` evaluated to a Number with 2 significant digits, else None.""" try: rv = e.evalf(2, strict=True) if rv.is_Number: return rv except PrecisionExhausted: pass # =================================================== if e.is_real: # we need _half(other) with constant floor or # floor(S.Half - e*arg(b)/2/pi) == 0 # handle -1 as special case if (e == -1) == True: # floor arg. is 1/2 + arg(b)/2/pi if _half(other): if b.is_negative is True: return S.NegativeOne**other*Pow(-b, e*other) if b.is_real is False: return Pow(b.conjugate()/Abs(b)**2, other) elif e.is_even: if b.is_real: b = refine(abs(b)) if b.is_imaginary: b = refine(abs(im(b)))*S.ImaginaryUnit if (abs(e) < 1) == True or (e == 1) == True: s = 1 # floor = 0 elif b.is_nonnegative: s = 1 # floor = 0 elif re(b).is_nonnegative and (abs(e) < 2) == True: s = 1 # floor = 0 elif fuzzy_not(im(b).is_zero) and (abs(e) == 2) == True: s = 1 # floor = 0 elif _half(other): s = exp(2*S.Pi*S.ImaginaryUnit*other*floor( S.Half - e*arg(b)/(2*S.Pi))) if s.is_real and _n2(sign(s) - s) == 0: s = sign(s) else: s = None else: # e.is_real is False requires: # _half(other) with constant floor or # floor(S.Half - im(e*log(b))/2/pi) == 0 try: s = exp(2*S.ImaginaryUnit*S.Pi*other* floor(S.Half - im(e*log(b))/2/S.Pi)) # be careful to test that s is -1 or 1 b/c sign(I) == I: # so check that s is real if s.is_real and _n2(sign(s) - s) == 0: s = sign(s) else: s = None except PrecisionExhausted: s = None if s is not None: return s*Pow(b, e*other) def _eval_is_even(self): if self.exp.is_integer and self.exp.is_positive: return self.base.is_even def _eval_is_positive(self): from sympy import log if self.base == self.exp: if self.base.is_nonnegative: return True elif self.base.is_positive: if self.exp.is_real: return True elif self.base.is_negative: if self.exp.is_even: return True if self.exp.is_odd: return False elif self.base.is_nonpositive: if self.exp.is_odd: return False elif self.base.is_imaginary: if self.exp.is_integer: m = self.exp % 4 if m.is_zero: return True if m.is_integer and m.is_zero is False: return False if self.exp.is_imaginary: return log(self.base).is_imaginary def _eval_is_negative(self): if self.base.is_negative: if self.exp.is_odd: return True if self.exp.is_even: return False elif self.base.is_positive: if self.exp.is_real: return False elif self.base.is_nonnegative: if self.exp.is_nonnegative: return False elif self.base.is_nonpositive: if self.exp.is_even: return False elif self.base.is_real: if self.exp.is_even: return False def _eval_is_zero(self): if self.base.is_zero: if self.exp.is_positive: return True elif self.exp.is_nonpositive: return False elif self.base.is_zero is False: if self.exp.is_finite: return False elif self.exp.is_infinite: if (1 - abs(self.base)).is_positive: return self.exp.is_positive elif (1 - abs(self.base)).is_negative: return self.exp.is_negative else: # when self.base.is_zero is None return None def _eval_is_integer(self): b, e = self.args if b.is_rational: if b.is_integer is False and e.is_positive: return False # rat**nonneg if b.is_integer and e.is_integer: if b is S.NegativeOne: return True if e.is_nonnegative or e.is_positive: return True if b.is_integer and e.is_negative and (e.is_finite or e.is_integer): if fuzzy_not((b - 1).is_zero) and fuzzy_not((b + 1).is_zero): return False if b.is_Number and e.is_Number: check = self.func(*self.args) return check.is_Integer def _eval_is_real(self): from sympy import arg, exp, log, Mul real_b = self.base.is_real if real_b is None: if self.base.func == exp and self.base.args[0].is_imaginary: return self.exp.is_imaginary return real_e = self.exp.is_real if real_e is None: return if real_b and real_e: if self.base.is_positive: return True elif self.base.is_nonnegative: if self.exp.is_nonnegative: return True else: if self.exp.is_integer: return True elif self.base.is_negative: if self.exp.is_Rational: return False if real_e and self.exp.is_negative: return Pow(self.base, -self.exp).is_real im_b = self.base.is_imaginary im_e = self.exp.is_imaginary if im_b: if self.exp.is_integer: if self.exp.is_even: return True elif self.exp.is_odd: return False elif im_e and log(self.base).is_imaginary: return True elif self.exp.is_Add: c, a = self.exp.as_coeff_Add() if c and c.is_Integer: return Mul( self.base**c, self.base**a, evaluate=False).is_real elif self.base in (-S.ImaginaryUnit, S.ImaginaryUnit): if (self.exp/2).is_integer is False: return False if real_b and im_e: if self.base is S.NegativeOne: return True c = self.exp.coeff(S.ImaginaryUnit) if c: ok = (c*log(self.base)/S.Pi).is_Integer if ok is not None: return ok if real_b is False: # we already know it's not imag i = arg(self.base)*self.exp/S.Pi return i.is_integer def _eval_is_complex(self): if all(a.is_complex for a in self.args): return True def _eval_is_imaginary(self): from sympy import arg, log if self.base.is_imaginary: if self.exp.is_integer: odd = self.exp.is_odd if odd is not None: return odd return if self.exp.is_imaginary: imlog = log(self.base).is_imaginary if imlog is not None: return False # I**i -> real; (2*I)**i -> complex ==> not imaginary if self.base.is_real and self.exp.is_real: if self.base.is_positive: return False else: rat = self.exp.is_rational if not rat: return rat if self.exp.is_integer: return False else: half = (2*self.exp).is_integer if half: return self.base.is_negative return half if self.base.is_real is False: # we already know it's not imag i = arg(self.base)*self.exp/S.Pi return (2*i).is_odd def _eval_is_odd(self): if self.exp.is_integer: if self.exp.is_positive: return self.base.is_odd elif self.exp.is_nonnegative and self.base.is_odd: return True elif self.base is S.NegativeOne: return True def _eval_is_finite(self): if self.exp.is_negative: if self.base.is_zero: return False if self.base.is_infinite: return True c1 = self.base.is_finite if c1 is None: return c2 = self.exp.is_finite if c2 is None: return if c1 and c2: if self.exp.is_nonnegative or fuzzy_not(self.base.is_zero): return True def _eval_is_prime(self): if self.exp == S.One: return self.base.is_prime if self.is_number: return self.doit().is_prime if self.is_integer and self.is_positive: """ a Power will be non-prime only if both base and exponent are greater than 1 """ if (self.base-1).is_positive or (self.exp-1).is_positive: return False def _eval_is_polar(self): return self.base.is_polar def _eval_subs(self, old, new): from sympy import exp, log, Symbol def _check(ct1, ct2, old): """Return bool, pow where, if bool is True, then the exponent of Pow `old` will combine with `pow` so the substitution is valid, otherwise bool will be False, cti are the coefficient and terms of an exponent of self or old In this _eval_subs routine a change like (b**(2*x)).subs(b**x, y) will give y**2 since (b**x)**2 == b**(2*x); if that equality does not hold then the substitution should not occur so `bool` will be False. """ coeff1, terms1 = ct1 coeff2, terms2 = ct2 if terms1 == terms2: pow = coeff1/coeff2 try: pow = as_int(pow) combines = True except ValueError: combines = Pow._eval_power( Pow(*old.as_base_exp(), evaluate=False), pow) is not None return combines, pow return False, None if old == self.base: return new**self.exp._subs(old, new) if old.func is self.func and self.base == old.base: if self.exp.is_Add is False: ct1 = self.exp.as_independent(Symbol, as_Add=False) ct2 = old.exp.as_independent(Symbol, as_Add=False) ok, pow = _check(ct1, ct2, old) if ok: # issue 5180: (x**(6*y)).subs(x**(3*y),z)->z**2 return self.func(new, pow) else: # b**(6*x+a).subs(b**(3*x), y) -> y**2 * b**a # exp(exp(x) + exp(x**2)).subs(exp(exp(x)), w) -> w * exp(exp(x**2)) oarg = old.exp new_l = [] o_al = [] ct2 = oarg.as_coeff_mul() for a in self.exp.args: newa = a._subs(old, new) ct1 = newa.as_coeff_mul() ok, pow = _check(ct1, ct2, old) if ok: new_l.append(new**pow) continue o_al.append(newa) if new_l: new_l.append(Pow(self.base, Add(*o_al), evaluate=False)) return Mul(*new_l) if old.func is exp and self.exp.is_real and self.base.is_positive: ct1 = old.args[0].as_independent(Symbol, as_Add=False) ct2 = (self.exp*log(self.base)).as_independent( Symbol, as_Add=False) ok, pow = _check(ct1, ct2, old) if ok: return self.func(new, pow) # (2**x).subs(exp(x*log(2)), z) -> z def as_base_exp(self): """Return base and exp of self. If base is 1/Integer, then return Integer, -exp. If this extra processing is not needed, the base and exp properties will give the raw arguments Examples ======== >>> from sympy import Pow, S >>> p = Pow(S.Half, 2, evaluate=False) >>> p.as_base_exp() (2, -2) >>> p.args (1/2, 2) """ b, e = self.args if b.is_Rational and b.p == 1 and b.q != 1: return Integer(b.q), -e return b, e def _eval_adjoint(self): from sympy.functions.elementary.complexes import adjoint i, p = self.exp.is_integer, self.base.is_positive if i: return adjoint(self.base)**self.exp if p: return self.base**adjoint(self.exp) if i is False and p is False: expanded = expand_complex(self) if expanded != self: return adjoint(expanded) def _eval_conjugate(self): from sympy.functions.elementary.complexes import conjugate as c i, p = self.exp.is_integer, self.base.is_positive if i: return c(self.base)**self.exp if p: return self.base**c(self.exp) if i is False and p is False: expanded = expand_complex(self) if expanded != self: return c(expanded) def _eval_transpose(self): from sympy.functions.elementary.complexes import transpose i, p = self.exp.is_integer, self.base.is_complex if p: return self.base**self.exp if i: return transpose(self.base)**self.exp if i is False and p is False: expanded = expand_complex(self) if expanded != self: return transpose(expanded) def _eval_expand_power_exp(self, **hints): """a**(n+m) -> a**n*a**m""" b = self.base e = self.exp if e.is_Add and e.is_commutative: expr = [] for x in e.args: expr.append(self.func(self.base, x)) return Mul(*expr) return self.func(b, e) def _eval_expand_power_base(self, **hints): """(a*b)**n -> a**n * b**n""" force = hints.get('force', False) b = self.base e = self.exp if not b.is_Mul: return self cargs, nc = b.args_cnc(split_1=False) # expand each term - this is top-level-only # expansion but we have to watch out for things # that don't have an _eval_expand method if nc: nc = [i._eval_expand_power_base(**hints) if hasattr(i, '_eval_expand_power_base') else i for i in nc] if e.is_Integer: if e.is_positive: rv = Mul(*nc*e) else: rv = 1/Mul(*nc*-e) if cargs: rv *= Mul(*cargs)**e return rv if not cargs: return self.func(Mul(*nc), e, evaluate=False) nc = [Mul(*nc)] # sift the commutative bases def pred(x): if x is S.ImaginaryUnit: return S.ImaginaryUnit polar = x.is_polar if polar: return True if polar is None: return fuzzy_bool(x.is_nonnegative) sifted = sift(cargs, pred) nonneg = sifted[True] other = sifted[None] neg = sifted[False] imag = sifted[S.ImaginaryUnit] if imag: I = S.ImaginaryUnit i = len(imag) % 4 if i == 0: pass elif i == 1: other.append(I) elif i == 2: if neg: nonn = -neg.pop() if nonn is not S.One: nonneg.append(nonn) else: neg.append(S.NegativeOne) else: if neg: nonn = -neg.pop() if nonn is not S.One: nonneg.append(nonn) else: neg.append(S.NegativeOne) other.append(I) del imag # bring out the bases that can be separated from the base if force or e.is_integer: # treat all commutatives the same and put nc in other cargs = nonneg + neg + other other = nc else: # this is just like what is happening automatically, except # that now we are doing it for an arbitrary exponent for which # no automatic expansion is done assert not e.is_Integer # handle negatives by making them all positive and putting # the residual -1 in other if len(neg) > 1: o = S.One if not other and neg[0].is_Number: o *= neg.pop(0) if len(neg) % 2: o = -o for n in neg: nonneg.append(-n) if o is not S.One: other.append(o) elif neg and other: if neg[0].is_Number and neg[0] is not S.NegativeOne: other.append(S.NegativeOne) nonneg.append(-neg[0]) else: other.extend(neg) else: other.extend(neg) del neg cargs = nonneg other += nc rv = S.One if cargs: rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs]) if other: rv *= self.func(Mul(*other), e, evaluate=False) return rv def _eval_expand_multinomial(self, **hints): """(a+b+..) ** n -> a**n + n*a**(n-1)*b + .., n is nonzero integer""" base, exp = self.args result = self if exp.is_Rational and exp.p > 0 and base.is_Add: if not exp.is_Integer: n = Integer(exp.p // exp.q) if not n: return result else: radical, result = self.func(base, exp - n), [] expanded_base_n = self.func(base, n) if expanded_base_n.is_Pow: expanded_base_n = \ expanded_base_n._eval_expand_multinomial() for term in Add.make_args(expanded_base_n): result.append(term*radical) return Add(*result) n = int(exp) if base.is_commutative: order_terms, other_terms = [], [] for b in base.args: if b.is_Order: order_terms.append(b) else: other_terms.append(b) if order_terms: # (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n) f = Add(*other_terms) o = Add(*order_terms) if n == 2: return expand_multinomial(f**n, deep=False) + n*f*o else: g = expand_multinomial(f**(n - 1), deep=False) return expand_mul(f*g, deep=False) + n*g*o if base.is_number: # Efficiently expand expressions of the form (a + b*I)**n # where 'a' and 'b' are real numbers and 'n' is integer. a, b = base.as_real_imag() if a.is_Rational and b.is_Rational: if not a.is_Integer: if not b.is_Integer: k = self.func(a.q * b.q, n) a, b = a.p*b.q, a.q*b.p else: k = self.func(a.q, n) a, b = a.p, a.q*b elif not b.is_Integer: k = self.func(b.q, n) a, b = a*b.q, b.p else: k = 1 a, b, c, d = int(a), int(b), 1, 0 while n: if n & 1: c, d = a*c - b*d, b*c + a*d n -= 1 a, b = a*a - b*b, 2*a*b n //= 2 I = S.ImaginaryUnit if k == 1: return c + I*d else: return Integer(c)/k + I*d/k p = other_terms # (x+y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3 # in this particular example: # p = [x,y]; n = 3 # so now it's easy to get the correct result -- we get the # coefficients first: from sympy import multinomial_coefficients from sympy.polys.polyutils import basic_from_dict expansion_dict = multinomial_coefficients(len(p), n) # in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3} # and now construct the expression. return basic_from_dict(expansion_dict, *p) else: if n == 2: return Add(*[f*g for f in base.args for g in base.args]) else: multi = (base**(n - 1))._eval_expand_multinomial() if multi.is_Add: return Add(*[f*g for f in base.args for g in multi.args]) else: # XXX can this ever happen if base was an Add? return Add(*[f*multi for f in base.args]) elif (exp.is_Rational and exp.p < 0 and base.is_Add and abs(exp.p) > exp.q): return 1 / self.func(base, -exp)._eval_expand_multinomial() elif exp.is_Add and base.is_Number: # a + b a b # n --> n n , where n, a, b are Numbers coeff, tail = S.One, S.Zero for term in exp.args: if term.is_Number: coeff *= self.func(base, term) else: tail += term return coeff * self.func(base, tail) else: return result def as_real_imag(self, deep=True, **hints): from sympy import atan2, cos, im, re, sin from sympy.polys.polytools import poly if self.exp.is_Integer: exp = self.exp re, im = self.base.as_real_imag(deep=deep) if not im: return self, S.Zero a, b = symbols('a b', cls=Dummy) if exp >= 0: if re.is_Number and im.is_Number: # We can be more efficient in this case expr = expand_multinomial(self.base**exp) return expr.as_real_imag() expr = poly( (a + b)**exp) # a = re, b = im; expr = (a + b*I)**exp else: mag = re**2 + im**2 re, im = re/mag, -im/mag if re.is_Number and im.is_Number: # We can be more efficient in this case expr = expand_multinomial((re + im*S.ImaginaryUnit)**-exp) return expr.as_real_imag() expr = poly((a + b)**-exp) # Terms with even b powers will be real r = [i for i in expr.terms() if not i[0][1] % 2] re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) # Terms with odd b powers will be imaginary r = [i for i in expr.terms() if i[0][1] % 4 == 1] im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) r = [i for i in expr.terms() if i[0][1] % 4 == 3] im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r]) return (re_part.subs({a: re, b: S.ImaginaryUnit*im}), im_part1.subs({a: re, b: im}) + im_part3.subs({a: re, b: -im})) elif self.exp.is_Rational: re, im = self.base.as_real_imag(deep=deep) if im.is_zero and self.exp is S.Half: if re.is_nonnegative: return self, S.Zero if re.is_nonpositive: return S.Zero, (-self.base)**self.exp # XXX: This is not totally correct since for x**(p/q) with # x being imaginary there are actually q roots, but # only a single one is returned from here. r = self.func(self.func(re, 2) + self.func(im, 2), S.Half) t = atan2(im, re) rp, tp = self.func(r, self.exp), t*self.exp return (rp*cos(tp), rp*sin(tp)) else: if deep: hints['complex'] = False expanded = self.expand(deep, **hints) if hints.get('ignore') == expanded: return None else: return (re(expanded), im(expanded)) else: return (re(self), im(self)) def _eval_derivative(self, s): from sympy import log dbase = self.base.diff(s) dexp = self.exp.diff(s) return self * (dexp * log(self.base) + dbase * self.exp/self.base) def _eval_evalf(self, prec): base, exp = self.as_base_exp() base = base._evalf(prec) if not exp.is_Integer: exp = exp._evalf(prec) if exp.is_negative and base.is_number and base.is_real is False: base = base.conjugate() / (base * base.conjugate())._evalf(prec) exp = -exp return self.func(base, exp).expand() return self.func(base, exp) def _eval_is_polynomial(self, syms): if self.exp.has(*syms): return False if self.base.has(*syms): return bool(self.base._eval_is_polynomial(syms) and self.exp.is_Integer and (self.exp >= 0)) else: return True def _eval_is_rational(self): p = self.func(*self.as_base_exp()) # in case it's unevaluated if not p.is_Pow: return p.is_rational b, e = p.as_base_exp() if e.is_Rational and b.is_Rational: # we didn't check that e is not an Integer # because Rational**Integer autosimplifies return False if e.is_integer: if b.is_rational: if fuzzy_not(b.is_zero) or e.is_nonnegative: return True if b == e: # always rational, even for 0**0 return True elif b.is_irrational: return e.is_zero def _eval_is_algebraic(self): if self.base.is_zero or (self.base - 1).is_zero: return True elif self.exp.is_rational: return self.base.is_algebraic elif self.base.is_algebraic and self.exp.is_algebraic: if ((fuzzy_not(self.base.is_zero) and fuzzy_not((self.base - 1).is_zero)) or self.base.is_integer is False or self.base.is_irrational): return self.exp.is_rational def _eval_is_rational_function(self, syms): if self.exp.has(*syms): return False if self.base.has(*syms): return self.base._eval_is_rational_function(syms) and \ self.exp.is_Integer else: return True def _eval_is_algebraic_expr(self, syms): if self.exp.has(*syms): return False if self.base.has(*syms): return self.base._eval_is_algebraic_expr(syms) and \ self.exp.is_Rational else: return True def as_numer_denom(self): if not self.is_commutative: return self, S.One base, exp = self.as_base_exp() n, d = base.as_numer_denom() # this should be the same as ExpBase.as_numer_denom wrt # exponent handling neg_exp = exp.is_negative if not neg_exp and not (-exp).is_negative: neg_exp = _coeff_isneg(exp) int_exp = exp.is_integer # the denominator cannot be separated from the numerator if # its sign is unknown unless the exponent is an integer, e.g. # sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the # denominator is negative the numerator and denominator can # be negated and the denominator (now positive) separated. if not (d.is_real or int_exp): n = base d = S.One dnonpos = d.is_nonpositive if dnonpos: n, d = -n, -d elif dnonpos is None and not int_exp: n = base d = S.One if neg_exp: n, d = d, n exp = -exp return self.func(n, exp), self.func(d, exp) def matches(self, expr, repl_dict={}, old=False): expr = _sympify(expr) # special case, pattern = 1 and expr.exp can match to 0 if expr is S.One: d = repl_dict.copy() d = self.exp.matches(S.Zero, d) if d is not None: return d # make sure the expression to be matched is an Expr if not isinstance(expr, Expr): return None b, e = expr.as_base_exp() # special case number sb, se = self.as_base_exp() if sb.is_Symbol and se.is_Integer and expr: if e.is_rational: return sb.matches(b**(e/se), repl_dict) return sb.matches(expr**(1/se), repl_dict) d = repl_dict.copy() d = self.base.matches(b, d) if d is None: return None d = self.exp.xreplace(d).matches(e, d) if d is None: return Expr.matches(self, expr, repl_dict) return d def _eval_nseries(self, x, n, logx): # NOTE! This function is an important part of the gruntz algorithm # for computing limits. It has to return a generalized power # series with coefficients in C(log, log(x)). In more detail: # It has to return an expression # c_0*x**e_0 + c_1*x**e_1 + ... (finitely many terms) # where e_i are numbers (not necessarily integers) and c_i are # expressions involving only numbers, the log function, and log(x). from sympy import ceiling, collect, exp, log, O, Order, powsimp b, e = self.args if e.is_Integer: if e > 0: # positive integer powers are easy to expand, e.g.: # sin(x)**4 = (x-x**3/3+...)**4 = ... return expand_multinomial(self.func(b._eval_nseries(x, n=n, logx=logx), e), deep=False) elif e is S.NegativeOne: # this is also easy to expand using the formula: # 1/(1 + x) = 1 - x + x**2 - x**3 ... # so we need to rewrite base to the form "1+x" nuse = n cf = 1 try: ord = b.as_leading_term(x) cf = Order(ord, x).getn() if cf and cf.is_Number: nuse = n + 2*ceiling(cf) else: cf = 1 except NotImplementedError: pass b_orig, prefactor = b, O(1, x) while prefactor.is_Order: nuse += 1 b = b_orig._eval_nseries(x, n=nuse, logx=logx) prefactor = b.as_leading_term(x) # express "rest" as: rest = 1 + k*x**l + ... + O(x**n) rest = expand_mul((b - prefactor)/prefactor) if rest.is_Order: return 1/prefactor + rest/prefactor + O(x**n, x) k, l = rest.leadterm(x) if l.is_Rational and l > 0: pass elif l.is_number and l > 0: l = l.evalf() elif l == 0: k = k.simplify() if k == 0: # if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to # factor the w**4 out using collect: return 1/collect(prefactor, x) else: raise NotImplementedError() else: raise NotImplementedError() if cf < 0: cf = S.One/abs(cf) try: dn = Order(1/prefactor, x).getn() if dn and dn < 0: pass else: dn = 0 except NotImplementedError: dn = 0 terms = [1/prefactor] for m in range(1, ceiling((n - dn)/l*cf)): new_term = terms[-1]*(-rest) if new_term.is_Pow: new_term = new_term._eval_expand_multinomial( deep=False) else: new_term = expand_mul(new_term, deep=False) terms.append(new_term) terms.append(O(x**n, x)) return powsimp(Add(*terms), deep=True, combine='exp') else: # negative powers are rewritten to the cases above, for # example: # sin(x)**(-4) = 1/( sin(x)**4) = ... # and expand the denominator: nuse, denominator = n, O(1, x) while denominator.is_Order: denominator = (b**(-e))._eval_nseries(x, n=nuse, logx=logx) nuse += 1 if 1/denominator == self: return self # now we have a type 1/f(x), that we know how to expand return (1/denominator)._eval_nseries(x, n=n, logx=logx) if e.has(Symbol): return exp(e*log(b))._eval_nseries(x, n=n, logx=logx) # see if the base is as simple as possible bx = b while bx.is_Pow and bx.exp.is_Rational: bx = bx.base if bx == x: return self # work for b(x)**e where e is not an Integer and does not contain x # and hopefully has no other symbols def e2int(e): """return the integer value (if possible) of e and a flag indicating whether it is bounded or not.""" n = e.limit(x, 0) infinite = n.is_infinite if not infinite: # XXX was int or floor intended? int used to behave like floor # so int(-Rational(1, 2)) returned -1 rather than int's 0 try: n = int(n) except TypeError: #well, the n is something more complicated (like 1+log(2)) try: n = int(n.evalf()) + 1 # XXX why is 1 being added? except TypeError: pass # hope that base allows this to be resolved n = _sympify(n) return n, infinite order = O(x**n, x) ei, infinite = e2int(e) b0 = b.limit(x, 0) if infinite and (b0 is S.One or b0.has(Symbol)): # XXX what order if b0 is S.One: resid = (b - 1) if resid.is_positive: return S.Infinity elif resid.is_negative: return S.Zero raise ValueError('cannot determine sign of %s' % resid) return b0**ei if (b0 is S.Zero or b0.is_infinite): if infinite is not False: return b0**e # XXX what order if not ei.is_number: # if not, how will we proceed? raise ValueError( 'expecting numerical exponent but got %s' % ei) nuse = n - ei if e.is_real and e.is_positive: lt = b.as_leading_term(x) # Try to correct nuse (= m) guess from: # (lt + rest + O(x**m))**e = # lt**e*(1 + rest/lt + O(x**m)/lt)**e = # lt**e + ... + O(x**m)*lt**(e - 1) = ... + O(x**n) try: cf = Order(lt, x).getn() nuse = ceiling(n - cf*(e - 1)) except NotImplementedError: pass bs = b._eval_nseries(x, n=nuse, logx=logx) terms = bs.removeO() if terms.is_Add: bs = terms lt = terms.as_leading_term(x) # bs -> lt + rest -> lt*(1 + (bs/lt - 1)) return ((self.func(lt, e) * self.func((bs/lt).expand(), e).nseries( x, n=nuse, logx=logx)).expand() + order) if bs.is_Add: from sympy import O # So, bs + O() == terms c = Dummy('c') res = [] for arg in bs.args: if arg.is_Order: arg = c*arg.expr res.append(arg) bs = Add(*res) rv = (bs**e).series(x).subs(c, O(1, x)) rv += order return rv rv = bs**e if terms != bs: rv += order return rv # either b0 is bounded but neither 1 nor 0 or e is infinite # b -> b0 + (b-b0) -> b0 * (1 + (b/b0-1)) o2 = order*(b0**-e) z = (b/b0 - 1) o = O(z, x) if o is S.Zero or o2 is S.Zero: infinite = True else: if o.expr.is_number: e2 = log(o2.expr*x)/log(x) else: e2 = log(o2.expr)/log(o.expr) n, infinite = e2int(e2) if infinite: # requested accuracy gives infinite series, # order is probably non-polynomial e.g. O(exp(-1/x), x). r = 1 + z else: l = [] g = None for i in range(n + 2): g = self._taylor_term(i, z, g) g = g.nseries(x, n=n, logx=logx) l.append(g) r = Add(*l) return expand_mul(r*b0**e) + order def _eval_as_leading_term(self, x): from sympy import exp, log if not self.exp.has(x): return self.func(self.base.as_leading_term(x), self.exp) return exp(self.exp * log(self.base)).as_leading_term(x) @cacheit def _taylor_term(self, n, x, *previous_terms): # of (1+x)**e from sympy import binomial return binomial(self.exp, n) * self.func(x, n) def _sage_(self): return self.args[0]._sage_()**self.args[1]._sage_() def as_content_primitive(self, radical=False): """Return the tuple (R, self/R) where R is the positive Rational extracted from self. Examples ======== >>> from sympy import sqrt >>> sqrt(4 + 4*sqrt(2)).as_content_primitive() (2, sqrt(1 + sqrt(2))) >>> sqrt(3 + 3*sqrt(2)).as_content_primitive() (1, sqrt(3)*sqrt(1 + sqrt(2))) >>> from sympy import expand_power_base, powsimp, Mul >>> from sympy.abc import x, y >>> ((2*x + 2)**2).as_content_primitive() (4, (x + 1)**2) >>> (4**((1 + y)/2)).as_content_primitive() (2, 4**(y/2)) >>> (3**((1 + y)/2)).as_content_primitive() (1, 3**((y + 1)/2)) >>> (3**((5 + y)/2)).as_content_primitive() (9, 3**((y + 1)/2)) >>> eq = 3**(2 + 2*x) >>> powsimp(eq) == eq True >>> eq.as_content_primitive() (9, 3**(2*x)) >>> powsimp(Mul(*_)) 3**(2*x + 2) >>> eq = (2 + 2*x)**y >>> s = expand_power_base(eq); s.is_Mul, s (False, (2*x + 2)**y) >>> eq.as_content_primitive() (1, (2*(x + 1))**y) >>> s = expand_power_base(_[1]); s.is_Mul, s (True, 2**y*(x + 1)**y) See docstring of Expr.as_content_primitive for more examples. """ b, e = self.as_base_exp() b = _keep_coeff(*b.as_content_primitive(radical=radical)) ce, pe = e.as_content_primitive(radical=radical) if b.is_Rational: #e #= ce*pe #= ce*(h + t) #= ce*h + ce*t #=> self #= b**(ce*h)*b**(ce*t) #= b**(cehp/cehq)*b**(ce*t) #= b**(iceh+r/cehq)*b**(ce*t) #= b**(iceh)*b**(r/cehq)*b**(ce*t) #= b**(iceh)*b**(ce*t + r/cehq) h, t = pe.as_coeff_Add() if h.is_Rational: ceh = ce*h c = self.func(b, ceh) r = S.Zero if not c.is_Rational: iceh, r = divmod(ceh.p, ceh.q) c = self.func(b, iceh) return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.q)) e = _keep_coeff(ce, pe) # b**e = (h*t)**e = h**e*t**e = c*m*t**e if e.is_Rational and b.is_Mul: h, t = b.as_content_primitive(radical=radical) # h is positive c, m = self.func(h, e).as_coeff_Mul() # so c is positive m, me = m.as_base_exp() if m is S.One or me == e: # probably always true # return the following, not return c, m*Pow(t, e) # which would change Pow into Mul; we let sympy # decide what to do by using the unevaluated Mul, e.g # should it stay as sqrt(2 + 2*sqrt(5)) or become # sqrt(2)*sqrt(1 + sqrt(5)) return c, self.func(_keep_coeff(m, t), e) return S.One, self.func(b, e) def is_constant(self, *wrt, **flags): expr = self if flags.get('simplify', True): expr = expr.simplify() b, e = expr.as_base_exp() bz = b.equals(0) if bz: # recalculate with assumptions in case it's unevaluated new = b**e if new != expr: return new.is_constant() econ = e.is_constant(*wrt) bcon = b.is_constant(*wrt) if bcon: if econ: return True bz = b.equals(0) if bz is False: return False elif bcon is None: return None return e.equals(0) def _eval_difference_delta(self, n, step): b, e = self.args if e.has(n) and not b.has(n): new_e = e.subs(n, n + step) return (b**(new_e - e) - 1) * self from .add import Add from .numbers import Integer from .mul import Mul, _keep_coeff from .symbol import Symbol, Dummy, symbols
treeherder/yasnac
refs/heads/master
disk/motodisk.py
1
#!/usr/bin/env python """ MotoDisk: a software emulator for the YASNAC FC1 floppy disk drive. This program allows a YASNAC ERC series robot to have unlimited storage on a host PC """ from time import sleep import sys import os import argparse import serial import packets def log(message): """ Print the given message to stdout, flush stdout """ sys.stdout.write(message + "\n") sys.stdout.flush() return message def warn(message, force=False): """ Print the given message to stderr, flush stderr """ if DEBUG or force: sys.stderr.write(message + "\n") sys.stderr.flush() return message def chunks(iterable, chunksize): """ Yield successive n-sized chunks from the given iterable From http://stackoverflow.com/questions/312443 """ for i in xrange(0, len(iterable), chunksize): yield iterable[i:i+chunksize] def namefix(filename, filedata): """ make sure that if a jobname appears in the job file, the jobname matches the jobs's filename. log any corrections to stdout WARNING: side effect: this function ensures proper \r\n line endings """ expected_jobname = os.path.splitext(filename)[0] expected_entry = "//NAME {}".format(expected_jobname) result = [] for line in filedata.splitlines(): if line.startswith("//NAME ") and line != expected_entry: result.append(expected_entry) log('{}: Changing job name from "{}" to "{}"'.format( filename, line, expected_entry)) continue result.append(line) return "\r\n".join(result) + "\r\n" class SoftFC1(object): """ Emulate the FC1 disk controller """ com = None input_packets = None filelist = None overwrite = False def __init__(self, filelist=None, overwrite=False, baudrate=4800, port='/dev/ttyS0'): self.com = serial.Serial(port, baudrate, parity=serial.PARITY_EVEN, timeout=None) sleep(1) # wait for the port to be ready (an arbitrary period) log("opened serial port") self.input_packets = self.input_packet_streamer() # NOTE: generator self.filelist = filelist self.overwrite = overwrite def raw_read(self): """ Return the contents of incoming raw data on the serial port """ input_buffer = [] while not self.com.inWaiting(): sleep(0.05) while self.com.inWaiting(): input_buffer.append(self.com.read(size=self.com.inWaiting())) sleep(0.005) result = "".join(input_buffer) warn("raw_read {} bytes: {}".format(len(result), result.__repr__())) return result def raw_write(self, message): """ Send raw data on the serial port """ self.com.write(message) warn("raw_write {} bytes: {}".format(len(message), message.__repr__())) def write(self, message): """ encode and send the given message to the serial port """ self.raw_write(packets.encode(message)) def confirmed_write(self, message, limit=10): """ send a message, repeating as needed until we get an ack """ message_received = False while not message_received: self.write(message) packet = next(self.input_packets) if packet == "ACK": message_received = True break elif packet == "CAN": raise IOError(warn("ERC sent CANcel during confirmed write")) else: limit -= 1 if limit < 1: raise RuntimeError(warn("Can't to confirm write of {}".format( message.__repr__()))) warn("Confirmed write of {}".format(message)) return True def input_packet_streamer(self): """ Generator which returns a parsed packet from the buffer, getting more data as necessary to complete the packet """ parse_buffer = [] while True: if len(parse_buffer) < 6: parse_buffer.extend(self.raw_read()) try: (data, bytes_consumed) = packets.decode("".join(parse_buffer)) parse_buffer = parse_buffer[bytes_consumed:] yield data except packets.InvalidPacketHeader: # slide out a byte of unusable data parse_buffer.pop(0) except packets.NeedMoreInput: # get some more data from the serial parse_buffer.extend(self.raw_read()) def emulate(self): """ Loop, responding to serial requests as needed """ while True: try: packet = next(self.input_packets) if packet == 'ENQ': warn("Responding to ENQuiry packet") self.write('ACK') continue if packet == 'EOT': warn("Received EndOfTransmission packet") continue if packet == 'CAN': raise IOError(warn("Received general CANcel packet")) if packet == 'ACK': warn("Received unexpected ACKnowledge packet") continue if packet == 'LST': warn("Responding to LiST packet") if self.filelist: job_files = ["{:12}".format(filename) for filename in self.filelist if os.path.exists(filename)] else: job_files = ["{:12}".format(filename) for filename in os.listdir('.') if filename.endswith(".JBI") and 4 < len(filename) < 17] self.confirmed_write("LST{:04}{}".format( len(job_files), "".join(job_files))) self.write("EOF") continue if packet == 'DSZ': warn("Responding to DiskSiZe packet") self.confirmed_write("DSZ00729088") self.write("EOF") continue if packet.startswith('FRD'): warn("Responding to FileReaD packet") filename = packet[3:].rstrip() if self.filelist and filename not in self.filelist: raise RuntimeError(warn( "The requested filename does not appear on the " "command line", force=True)) with open(filename) as inputfh: # autocorrect any filename/jobname discontinuity filedata = namefix(filename, inputfh.read()) self.confirmed_write("FSZ{:08}".format(len(filedata))) # send the file in 255 byte blocks, retrying as necessary for chunk in chunks(filedata, 255): self.confirmed_write("FRD" + chunk) self.write("EOF") continue if packet.startswith('FWT'): warn("Responding to FileWriTe packet") filename = packet[3:].rstrip() if not self.overwrite and os.path.exists(filename): # we're going to rename TEST.JBI to TEST-1.JBI and # keep incrementing the number until we find one that # doesn't exist original_filename = filename rename_counter = 1 while os.path.exists(filename): filename = "{0}-{2}{1}".format( list(os.path.splitext(original_filename)) + [rename_counter]) rename_counter += 1 warn("Renaming {} to {}".format(original_filename, filename)) with open(filename, "w") as outputfh: self.write("ACK") while True: packet = next(self.input_packets) if packet.startswith("FWT"): outputfh.write(packet[3:]) self.write("ACK") continue if packet == "EOF": self.write("ACK") break warn("Unexpected packet during write: {}".format( packet)) continue warn("Unhandled packet: {}".format(packet)) except IOError: log("Resetting on CANcel") self.write("ACK") def main(): """ primary handler for command-line execution. return an exit status integer or a bool type (where True indicates successful exection) """ global DEBUG argp = argparse.ArgumentParser(description=( "MotoDisk: a software emulator for the YASNAC FC1 floppy disk drive")) argp.add_argument('-p', '--port', default='/dev/ttyS0', help=( "serial port to use")) argp.add_argument('-b', '--baud', default=4800, help=( "serialport baudrate to use")) argp.add_argument('-d', '--debug', action="store_true", help=( "enable debugging output")) argp.add_argument('-o', '--overwrite', action="store_true", help=( "enable existing files to be overwritten by the program")) argp.add_argument('file', nargs="*", default=None, help=( "optional: if you want only certain file(s) to be available to the " "robot, list those files on the command line. For example this " "allows you to send just a single file instead of all job (.JBI) " "files in the current working directory")) args = argp.parse_args() DEBUG = args.debug disk = SoftFC1(port=args.port, baudrate=args.baud, filelist=args.file, overwrite=args.overwrite) disk.emulate() return True if __name__ == '__main__': try: main() except KeyboardInterrupt: warn("Exiting due to keyboard interrupt (Ctrl-C)", force=True)
lalanza808/lalanza808.github.io
refs/heads/master
vendor/bundle/ruby/2.3.0/gems/pygments.rb-0.6.3/vendor/pygments-main/tests/test_using_api.py
36
# -*- coding: utf-8 -*- """ Pygments tests for using() ~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import unittest from pygments.lexer import using, bygroups, this, RegexLexer from pygments.token import String, Text, Keyword class TestLexer(RegexLexer): tokens = { 'root': [ (r'#.*', using(this, state='invalid')), (r'(")(.+?)(")', bygroups(String, using(this, state='string'), String)), (r'[^"]+', Text), ], 'string': [ (r'.+', Keyword), ], } class UsingStateTest(unittest.TestCase): def test_basic(self): expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'), (String, '"'), (Text, 'e\n')] t = list(TestLexer().get_tokens('a"bcd"e')) self.assertEqual(t, expected) def test_error(self): def gen(): return list(TestLexer().get_tokens('#a')) self.assertRaises(KeyError, gen)
samluescher/django-expenses
refs/heads/master
expenses/admin.py
1
from expenses.models import Expense, ExpenseType, ExpensesGroup, Bill from expenses.templatetags.moneyformats import money from django.contrib.admin.views.main import ChangeList from django.contrib import admin from django.contrib.admin.util import unquote from django.http import HttpResponse from django.utils.translation import ugettext_lazy as _, ugettext from django.http import HttpResponseRedirect from django.contrib import messages from django.core.urlresolvers import reverse from django.contrib import messages from django.db.models import Sum import datetime def reset_bill(modeladmin, request, queryset): queryset.update(bill=None, billed=False) reset_bill.short_description = _('Reset bill for selected entries') class ExpenseAdmin(admin.ModelAdmin): date_hierarchy = 'date' list_display = ('date', 'user', 'amount', 'expense_type', 'expense_group', 'comment', 'bill') list_filter = ('billed', 'date', 'user', 'expense_type', 'expense_group', 'bill') actions = [reset_bill] def get_actions(self, request): actions = super(ExpenseAdmin, self).get_actions(request) if not request.user.is_superuser: del actions['reset_bill'] return actions def save_model(self, request, obj, form, change): if not obj.user: obj.user = request.user return super(ExpenseAdmin, self).save_model(request, obj, form, change) def queryset(self, request): """ Filter the objects displayed in the change_list to only display those for the currently signed in user. """ qs = super(ExpenseAdmin, self).queryset(request) if not request.user.is_superuser: qs = qs.filter(expense_group__in=request.user.groups.all()) return qs def get_changelist_queryset(self, request): cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self) return cl.get_query_set(request) def changelist_view(self, request, extra_context=None): qs = self.get_changelist_queryset(request) bill_id = request.GET.get('bill__id__exact', False) bill = None if bill_id: try: bill = Bill.objects.get(pk=bill_id) except Bill.DoesNotExist: pass bill_confirm = request.POST.get('confirm', False) if qs.filter(billed=False).count() > 0 and not bill: if bill_confirm: qs = qs.filter(billed=False) bill_url = reverse('admin:expense_bill')+'?'+request.META['QUERY_STRING'] else: bill_url = reverse('admin:expenses_expense_changelist')+'?'+request.META['QUERY_STRING']+'&billed__exact=0' else: confirm_bill = bill_url = False extra_context = { 'expense_info': Expense.summarize(request.user, qs, bill), 'bill_url': bill_url, 'bill_confirm': bill_confirm, } return super(ExpenseAdmin, self).changelist_view(request, extra_context) def has_delete_permission(self, request, obj=None): return request.user.is_superuser or (obj and obj.user == request.user) def has_change_permission(self, request, obj=None): return not obj or request.user.is_superuser or (obj and obj.user == request.user) def change_view(self, request, object_id, *args, **kwargs): obj = self.get_object(request, unquote(object_id)) if not self.has_change_permission(request, obj): messages.info(request, _('You can only change your own entries.')) return HttpResponseRedirect(reverse('admin:expenses_expense_changelist')) else: return super(ExpenseAdmin, self).change_view(request, object_id, *args, **kwargs) def get_urls(self): from django.conf.urls.defaults import patterns, url urls = super(ExpenseAdmin, self).get_urls() url_patterns = patterns('', url(r'^bill/$', self.admin_site.admin_view(self.bill), name="expense_bill"), ) url_patterns.extend(urls) return url_patterns def bill(self, request): qs = self.get_changelist_queryset(request).filter(billed=False) if qs.count() > 0: bill = Bill() bill.save() for item in qs.filter(bill=None): item.bill = bill item.billed = True item.save() messages.add_message(request, messages.INFO, _('Bill %s was saved.') % bill) return HttpResponseRedirect(reverse('admin:expenses_expense_changelist')+'?'+request.META['QUERY_STRING']) class ExpenseTypeAdmin(admin.ModelAdmin): pass class ExpensesGroupAdmin(admin.ModelAdmin): list_display = ('name', 'user_names', 'expenses_sum') def user_names(self, obj): return ', '.join([user.__unicode__() for user in obj.user_set.all()]) user_names.short_description = _('users') def expenses_sum(self, obj): return money(Expense.objects.filter(expense_group=obj).aggregate(Sum('amount'))['amount__sum']) expenses_sum.short_description = _('total') class BillAdmin(admin.ModelAdmin): pass admin.site.register(Expense, ExpenseAdmin) admin.site.register(ExpenseType, ExpenseTypeAdmin) admin.site.register(ExpensesGroup, ExpensesGroupAdmin) admin.site.register(Bill, BillAdmin)
mecwerks/fofix
refs/heads/master
src/views/GuitarScene/instruments/__init__.py
3
from Drum import * from Guitar import * from Vocalist import *
breznak/nupic
refs/heads/master
tests/unit/nupic/algorithms/__init__.py
12133432
linea-it/dri
refs/heads/master
api/interfaces/migrations/__init__.py
12133432
trevor/calendarserver
refs/heads/master
calendarserver/tools/test/test_changeip.py
1
## # Copyright (c) 2005-2014 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from twistedcaldav.test.util import TestCase from calendarserver.tools.changeip_calendar import updateConfig class ChangeIPTestCase(TestCase): def test_updateConfig(self): plist = { "Untouched": "dont_change_me", "ServerHostName": "", "Scheduling": { "iMIP": { "Receiving": { "Server": "original_hostname", }, "Sending": { "Server": "original_hostname", "Address": "user@original_hostname", }, }, }, } updateConfig( plist, "10.1.1.1", "10.1.1.2", "original_hostname", "new_hostname" ) self.assertEquals( plist, { "Untouched": "dont_change_me", "ServerHostName": "", "Scheduling": { "iMIP": { "Receiving": { "Server": "new_hostname", }, "Sending": { "Server": "new_hostname", "Address": "user@new_hostname", }, }, }, } )
ohsailey/MAD-IS
refs/heads/master
mad_interface_server/views/communicate.py
1
''' Copyright (c) 2014 OpenISDM Project Name: OpenISDM MAD-IS Version: 1.0 File Name: communicate.py Abstract: communicate.py is a module of Interface Server (IS) of Mobile Assistance for Disasters (MAD) in the OpenISDM Virtual Repository project. Establish communication with client side, and server will do some actions according different request. Authors: Bai Shan-Wei, [email protected] License: GPL 3.0 This file is subject to the terms and conditions defined in file 'COPYING.txt', which is part of this source code package. Major Revision History: 2014/6/5: complete version 1.0 ''' from flask import Flask, request, jsonify, render_template from flask import Blueprint, after_this_request, make_response from mad_interface_server.database import db, POS from mad_interface_server import app demand = Blueprint('demand', __name__) import json import uuid import requests import time postInfo = "Null" need_validate = False @demand.teardown_request def teardown_request(exception): global postInfo postData = postInfo if postData != "Null": time.sleep(2) validate_topic_url(postData) postInfo = 'Null' pass else: return 'thx' @demand.route('/fetch/', methods=['POST', 'GET']) def receive_req(): ''' Receive a request from client side and response according different demands ''' from mad_interface_server import information response = information.answer(request.data) return jsonify(response) @demand.route('/send/', methods=['GET', 'POST']) def create_info(): ''' Receive the information from client side and store it. ''' from mad_interface_server import information if request.method == 'POST': data = json.loads(request.data) information.build_info(data) return 'ok' @demand.route('/hub/', methods=['GET', 'HEAD']) def discovery(): ''' If subscribers access '/hub' path by using HTTP GET or HEAD method, IS will response HTTP Link Header and response body to subscribers. This route can make a Link Header of response to subscribers Default Link Header can be "Null" ''' pos_id = request.args.get('posId') pos_type = request.args.get('posType') dt = determine_topic_and_hub(pos_id, pos_type) print 'return response' print dt # print >> sys.stderr, "/hub GET HEAD..." resp = make_response(render_template('ok.html'), 200) resp.headers['link'] = '<' + dt['hub_url'] + '>; rel="hub", <' \ + dt['topic_url'] + '>; rel="self"' print resp return resp # # To Do judgement function for response # # result = determineTopic(request.query_string) # @demand.route('/subscribe/', methods=['POST']) def hub(): print 'hello' # # If subscribers access '/hub' path using HTTP POST method, # IS will be a Hub to deal with subscribe/unsubcribe action. # # This request has a Content-Type of application/x-www-form-urlencoded and # the following parameters in its body: # # hub.callback # # hub.mode # # hub.topic # # hub.lease_seconds(Optional) - # The hub-determined number of seconds that the subscription will # stay active before expiring, measured from the time the verification # request was made from the hub to the subscriber. # # hub.secret(Optional) - # A subscriber-provided secret string that will be used to compute an # HMAC digest for authorized content distribution. # global postInfo postData = request.form print postData if postData['hub.mode'] and postData['hub.topic'] \ and postData['hub.callback']: if postData['hub.mode'] == 'subscribe': # # solve postData to Global context # # g.postData = postData # is_find_url = info.match_url(postInfo['hub.topic']) postInfo = postData resp = make_response(render_template('Accepted.html'), 202) return resp elif postData['hub.mode'] == 'unsubscribe': # # To Do a function to clear record in list of subscribers # resp = make_response(render_template('Accepted.html'), 202) return resp else: resp = make_response(render_template('Unknown.html'), 406) return resp else: resp = make_response(render_template('Unknown.html'), 406) return resp # # To Do publish for other publisher if we need to be a public hub # # elif postData['hub.mode'] == 'publish': # return 'publish' # def validate_topic_url(postData): # # Subscriptions MAY be validated by the Hubs who may require more details # to accept or refuse a subscription.The Hub MAY also check with the # publisher whether the subscription should be accepted.Hubs MUST preserve # the query string during subscription verification by appending new # parameters to the end of the list using the & (ampersand) character # to join. # # If topic URL is correct from publisher, the hub MUST perform verification # of intent of the subscirber if denied, hub must infrom GET request to # subscriber's callback URL [] # # print >> sys.stderr, 'validate_topic_url' # answer = fromDb(postData['hub.topic']) answer_reason = 'No this topic' print postInfo['hub.topic'] is_find_url = match_url(postInfo['hub.topic']) # if answer.judge: if is_find_url is True: # # Verifie Intent of the Subscribers # This request has the following query string arguments appended: # # hub.mode # hub.topic # hub.challage - A hub-generated, random string that MUST be echoed # by the subscriber to verify the subscription. # hub.lease_seconds(Optional) # randomKey = uuid.uuid4() payload = {'hub.mode': postInfo['hub.mode'], 'hub.topic': postInfo['hub.topic'], 'hub.challenge': randomKey} req = requests.get(postInfo['hub.callback'], params=payload) print payload print postInfo['hub.callback'] print str(req.status_code) if ( str(req.status_code)[:1] == '2' and str(req.content) == str(randomKey)): store_subscriber(postInfo['hub.topic'], postInfo['hub.callback']) content_distribution(postInfo['hub.callback']) print 'success' else: print 'fail' # 'verification to have failed.' # storefailedSubscritions(g.postData['hub.callback']) # print >> sys.stderr, 'storefailedSlubscritions: %s' % g.postData[ # 'hub.callback'] else: # # return 'send reason to subscribers' # This request has the following query string arguments appended: # # hub.mode # hub.topic # hub.reason(Optional) -The hub may include a reason for which the # subscription has been denied. # payload = {'hub.mode': postInfo['hub.mode'], 'hub.topic': postInfo['hub.topic'], 'hub.reason': answer_reason} req = requests.get(postInfo['hub.callback'], params=payload) @demand.route('/textView/') def show_text(): ''' Display the topic content with text. ''' return render_template('text_view.html') @demand.route('/imgView/') def show_img(): """ Display the topic content with image. """ print 'ff' return render_template('image_view.html') def determine_topic_and_hub(pos_id, pos_type): """ Decide which topic address and hub address will be assigned the subscriber and return the json object that include their values. pos_id : The POS server ID pos_type : The fix type or mobile type of POS server """ reply = { 'hub_url': app.config['WEB_URL'] + '/subscribe/', } reply['topic_url'] = 'Not found' if pos_type == 'fix': for p in db.session.query(POS): if p.id == pos_id: reply['topic_url'] = p.topic_dir elif pos_type == 'mobile': print 'testing' return reply def match_url(topic_url): is_find = False for p in db.session.query(POS): if p.topic_dir == topic_url: is_find = True return is_find def store_subscriber(topic_url, callback_url): for p in db.session.query(POS): if p.topic_dir == topic_url: p.callback_url = callback_url p.is_subscribe = True db.session.commit() print 'have stored the subscription' def content_distribution(sub_url): """ Prepare file that will send the subscriber, then publish to the corresponding subscriber. """ # search the corresponding POS ID according the subscriber url if sub_url is not None: for p in db.session.query(POS): if p.callback_url == sub_url: pos_id = p.id topic_dictionary = { 'png': app.config['TOPIC_DIR'] + pos_id + '/' + pos_id + '.png', 'rdf': app.config['TOPIC_DIR'] + pos_id + '/' + pos_id + '.rdf' } for x in topic_dictionary: files = {'file': open(topic_dictionary[x], 'rb')} r = requests.post(sub_url, files=files) else: print "This POS server have not subscribed"
AII-G2/ToFilms
refs/heads/master
ToFilms/ToFilms/settings.py
1
""" Django settings for ToFilms project. Generated by 'django-admin startproject' using Django 1.10.3. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'pdcn%emar*jz(98mx9x%&5e!hiwch&(6(!ylw@rp*+(_k-77xu' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'aplication', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'ToFilms.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'ToFilms.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/'
RafaelTorrealba/odoo
refs/heads/8.0
addons/payment_adyen/models/adyen.py
165
# -*- coding: utf-'8' "-*-" import base64 try: import simplejson as json except ImportError: import json from hashlib import sha1 import hmac import logging import urlparse from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment_adyen.controllers.main import AdyenController from openerp.osv import osv, fields from openerp.tools import float_round _logger = logging.getLogger(__name__) class AcquirerAdyen(osv.Model): _inherit = 'payment.acquirer' def _get_adyen_urls(self, cr, uid, environment, context=None): """ Adyen URLs - yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple """ return { 'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment), } def _get_providers(self, cr, uid, context=None): providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context) providers.append(['adyen', 'Adyen']) return providers _columns = { 'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'), 'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'), 'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'), } def _adyen_generate_merchant_sig(self, acquirer, inout, values): """ Generate the shasign for incoming or outgoing communications. :param browse acquirer: the payment.acquirer browse record. It should have a shakey in shaky out :param string inout: 'in' (openerp contacting ogone) or 'out' (adyen contacting openerp). In this last case only some fields should be contained (see e-Commerce basic) :param dict values: transaction values :return string: shasign """ assert inout in ('in', 'out') assert acquirer.provider == 'adyen' if inout == 'in': keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split() else: keys = "authResult pspReference merchantReference skinCode merchantReturnData".split() def get_value(key): if values.get(key): return values[key] return '' sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii') key = acquirer.adyen_skin_hmac_key.encode('ascii') return base64.b64encode(hmac.new(key, sign, sha1).digest()) def adyen_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None): base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url') acquirer = self.browse(cr, uid, id, context=context) # tmp import datetime from dateutil import relativedelta tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1) adyen_tx_values = dict(tx_values) adyen_tx_values.update({ 'merchantReference': tx_values['reference'], 'paymentAmount': '%d' % int(float_round(tx_values['amount'], 2) * 100), 'currencyCode': tx_values['currency'] and tx_values['currency'].name or '', 'shipBeforeDate': tmp_date, 'skinCode': acquirer.adyen_skin_code, 'merchantAccount': acquirer.adyen_merchant_account, 'shopperLocale': partner_values['lang'], 'sessionValidity': tmp_date, 'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url), }) if adyen_tx_values.get('return_url'): adyen_tx_values['merchantReturnData'] = json.dumps({'return_url': '%s' % adyen_tx_values.pop('return_url')}) adyen_tx_values['merchantSig'] = self._adyen_generate_merchant_sig(acquirer, 'in', adyen_tx_values) return partner_values, adyen_tx_values def adyen_get_form_action_url(self, cr, uid, id, context=None): acquirer = self.browse(cr, uid, id, context=context) return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url'] class TxAdyen(osv.Model): _inherit = 'payment.transaction' _columns = { 'adyen_psp_reference': fields.char('Adyen PSP Reference'), } # -------------------------------------------------- # FORM RELATED METHODS # -------------------------------------------------- def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None): reference, pspReference = data.get('merchantReference'), data.get('pspReference') if not reference or not pspReference: error_msg = 'Adyen: received data with missing reference (%s) or missing pspReference (%s)' % (reference, pspReference) _logger.error(error_msg) raise ValidationError(error_msg) # find tx -> @TDENOTE use pspReference ? tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context) if not tx_ids or len(tx_ids) > 1: error_msg = 'Adyen: received data for reference %s' % (reference) if not tx_ids: error_msg += '; no order found' else: error_msg += '; multiple order found' _logger.error(error_msg) raise ValidationError(error_msg) tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context) # verify shasign shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data) if shasign_check != data.get('merchantSig'): error_msg = 'Adyen: invalid merchantSig, received %s, computed %s' % (data.get('merchantSig'), shasign_check) _logger.warning(error_msg) raise ValidationError(error_msg) return tx def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None): invalid_parameters = [] # reference at acquirer: pspReference if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference: invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference)) # seller if data.get('skinCode') != tx.acquirer_id.adyen_skin_code: invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code)) # result if not data.get('authResult'): invalid_parameters.append(('authResult', data.get('authResult'), 'something')) return invalid_parameters def _adyen_form_validate(self, cr, uid, tx, data, context=None): status = data.get('authResult', 'PENDING') if status == 'AUTHORISED': tx.write({ 'state': 'done', 'adyen_psp_reference': data.get('pspReference'), # 'date_validate': data.get('payment_date', fields.datetime.now()), # 'paypal_txn_type': data.get('express_checkout') }) return True elif status == 'PENDING': tx.write({ 'state': 'pending', 'adyen_psp_reference': data.get('pspReference'), }) return True else: error = 'Adyen: feedback error' _logger.info(error) tx.write({ 'state': 'error', 'state_message': error }) return False
mindnervestech/mnrp
refs/heads/master
addons/project/company.py
381
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ class res_company(osv.osv): _inherit = 'res.company' _columns = { 'project_time_mode_id': fields.many2one('product.uom', 'Project Time Unit', help='This will set the unit of measure used in projects and tasks.\n' \ "If you use the timesheet linked to projects (project_timesheet module), don't " \ "forget to setup the right unit of measure in your employees.", ), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
levilucio/SyVOLT
refs/heads/master
mbeddr2C_MM/transformation_from_mps/Hlayer1rule4.py
1
from core.himesis import Himesis import uuid class Hlayer1rule4(Himesis): def __init__(self): """ Creates the himesis graph representing the DSLTrans rule layer1rule4. """ # Flag this instance as compiled now self.is_compiled = True super(Hlayer1rule4, self).__init__(name='Hlayer1rule4', num_nodes=0, edges=[]) # Set the graph attributes self["mm__"] = ['HimesisMM'] self["name"] = """layer1rule4""" self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer1rule4') # match model. We only support one match model self.add_node() self.vs[0]["mm__"] = """MatchModel""" # apply model node self.add_node() self.vs[1]["mm__"] = """ApplyModel""" # paired with relation between match and apply models self.add_node() self.vs[2]["mm__"] = """paired_with""" self.vs[2]["attr1"] = """layer1rule4""" # match class Operation(layer1rule4class0Operation) node self.add_node() self.vs[3]["mm__"] = """Operation""" self.vs[3]["attr1"] = """+""" # match class OperationParameter(layer1rule4class1OperationParameter) node self.add_node() self.vs[4]["mm__"] = """OperationParameter""" self.vs[4]["attr1"] = """+""" # match class StringType(layer1rule4class2StringType) node self.add_node() self.vs[5]["mm__"] = """StringType""" self.vs[5]["attr1"] = """+""" # apply class FunctionRefType(layer1rule4class3FunctionRefType) node self.add_node() self.vs[6]["mm__"] = """FunctionRefType""" self.vs[6]["attr1"] = """1""" # apply class StringType(layer1rule4class4StringType) node self.add_node() self.vs[7]["mm__"] = """StringType""" self.vs[7]["attr1"] = """1""" # match association Operation--parameters-->OperationParameter node self.add_node() self.vs[8]["attr1"] = """parameters""" self.vs[8]["mm__"] = """directLink_S""" # match association OperationParameter--type-->StringType node self.add_node() self.vs[9]["attr1"] = """type""" self.vs[9]["mm__"] = """directLink_S""" # apply association FunctionRefType--argTypes-->StringType node self.add_node() self.vs[10]["attr1"] = """argTypes""" self.vs[10]["mm__"] = """directLink_T""" # backward association StringType-->StringTypenode self.add_node() self.vs[11]["mm__"] = """backward_link""" # backward association FunctionRefType-->Operationnode self.add_node() self.vs[12]["mm__"] = """backward_link""" # Add the edges self.add_edges([ (0,3), # matchmodel -> match_class Operation(layer1rule4class0Operation) (0,4), # matchmodel -> match_class OperationParameter(layer1rule4class1OperationParameter) (0,5), # matchmodel -> match_class StringType(layer1rule4class2StringType) (1,6), # applymodel -> apply_classFunctionRefType(layer1rule4class3FunctionRefType) (1,7), # applymodel -> apply_classStringType(layer1rule4class4StringType) (3,8), # match classOperation(layer1rule4class0Operation) -> association parameters (8,4), # associationparameters -> match_classOperation(layer1rule4class1OperationParameter) (4,9), # match classOperationParameter(layer1rule4class1OperationParameter) -> association type (9,5), # associationtype -> match_classOperationParameter(layer1rule4class2StringType) (6,10), # apply class FunctionRefType(layer1rule4class3FunctionRefType) -> association argTypes (10,7), # associationargTypes -> apply_classStringType(layer1rule4class4StringType) (7,11), # apply class StringType(layer1rule4class2StringType) -> backward_association (11,5), # backward_associationStringType -> match_class StringType(layer1rule4class2StringType) (6,12), # apply class FunctionRefType(layer1rule4class0Operation) -> backward_association (12,3), # backward_associationOperation -> match_class Operation(layer1rule4class0Operation) (0,2), # matchmodel -> pairedwith (2,1) # pairedwith -> applyModel ]) self["equations"] = []
spirrello/spirrello-pynet-work
refs/heads/master
applied_python/lib/python2.7/site-packages/pip/operations/freeze.py
84
from __future__ import absolute_import import logging import re import pip from pip.compat import stdlib_pkgs from pip.req import InstallRequirement from pip.utils import get_installed_distributions from pip._vendor import pkg_resources logger = logging.getLogger(__name__) # packages to exclude from freeze output freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute'] def freeze( requirement=None, find_links=None, local_only=None, user_only=None, skip_regex=None, find_tags=False, default_vcs=None, isolated=False): find_links = find_links or [] skip_match = None if skip_regex: skip_match = re.compile(skip_regex) dependency_links = [] for dist in pkg_resources.working_set: if dist.has_metadata('dependency_links.txt'): dependency_links.extend( dist.get_metadata_lines('dependency_links.txt') ) for link in find_links: if '#egg=' in link: dependency_links.append(link) for link in find_links: yield '-f %s' % link installations = {} for dist in get_installed_distributions(local_only=local_only, skip=freeze_excludes, user_only=user_only): req = pip.FrozenRequirement.from_dist( dist, dependency_links, find_tags=find_tags, ) installations[req.name] = req if requirement: with open(requirement) as req_file: for line in req_file: if (not line.strip() or line.strip().startswith('#') or (skip_match and skip_match.search(line)) or line.startswith(( '-r', '--requirement', '-Z', '--always-unzip', '-f', '--find-links', '-i', '--index-url', '--extra-index-url'))): yield line.rstrip() continue if line.startswith('-e') or line.startswith('--editable'): if line.startswith('-e'): line = line[2:].strip() else: line = line[len('--editable'):].strip().lstrip('=') line_req = InstallRequirement.from_editable( line, default_vcs=default_vcs, isolated=isolated, ) else: line_req = InstallRequirement.from_line( line, isolated=isolated, ) if not line_req.name: logger.info( "Skipping line because it's not clear what it " "would install: %s", line.strip(), ) logger.info( " (add #egg=PackageName to the URL to avoid" " this warning)" ) elif line_req.name not in installations: logger.warning( "Requirement file contains %s, but that package is" " not installed", line.strip(), ) else: yield str(installations[line_req.name]).rstrip() del installations[line_req.name] yield( '## The following requirements were added by ' 'pip freeze:' ) for installation in sorted( installations.values(), key=lambda x: x.name.lower()): yield str(installation).rstrip()
fengzhe29888/gnuradio-old
refs/heads/master
gr-utils/python/modtool/modtool_info.py
13
# # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ Returns information about a module """ import os from optparse import OptionGroup from modtool_base import ModTool, ModToolException from util_functions import get_modname class ModToolInfo(ModTool): """ Return information about a given module """ name = 'info' aliases = ('getinfo', 'inf') def __init__(self): ModTool.__init__(self) self._directory = None self._python_readable = False self._suggested_dirs = None def setup_parser(self): """ Initialise the option parser for 'gr_modtool info' """ parser = ModTool.setup_parser(self) parser.usage = '%prog info [options]. \n Call %prog without any options to run it interactively.' ogroup = OptionGroup(parser, "Info options") ogroup.add_option("--python-readable", action="store_true", default=None, help="Return the output in a format that's easier to read for Python scripts.") ogroup.add_option("--suggested-dirs", default=None, type="string", help="Suggest typical include dirs if nothing better can be detected.") parser.add_option_group(ogroup) return parser def setup(self, options, args): # Won't call parent's setup(), because that's too chatty self._directory = options.directory self._python_readable = options.python_readable self._suggested_dirs = options.suggested_dirs def run(self): """ Go, go, go! """ mod_info = dict() mod_info['base_dir'] = self._get_base_dir(self._directory) if mod_info['base_dir'] is None: raise ModToolException('{}' if self._python_readable else "No module found.") os.chdir(mod_info['base_dir']) mod_info['modname'] = get_modname() if mod_info['modname'] is None: raise ModToolException('{}' if self._python_readable else "No module found.") if self._info['version'] == '36' and ( os.path.isdir(os.path.join('include', mod_info['modname'])) or os.path.isdir(os.path.join('include', 'gnuradio', mod_info['modname'])) ): self._info['version'] = '37' mod_info['version'] = self._info['version'] if 'is_component' in self._info.keys(): mod_info['is_component'] = True mod_info['incdirs'] = [] mod_incl_dir = os.path.join(mod_info['base_dir'], 'include') if os.path.isdir(os.path.join(mod_incl_dir, mod_info['modname'])): mod_info['incdirs'].append(os.path.join(mod_incl_dir, mod_info['modname'])) else: mod_info['incdirs'].append(mod_incl_dir) build_dir = self._get_build_dir(mod_info) if build_dir is not None: mod_info['build_dir'] = build_dir mod_info['incdirs'] += self._get_include_dirs(mod_info) if self._python_readable: print str(mod_info) else: self._pretty_print(mod_info) def _get_base_dir(self, start_dir): """ Figure out the base dir (where the top-level cmake file is) """ base_dir = os.path.abspath(start_dir) if self._check_directory(base_dir): return base_dir else: (up_dir, this_dir) = os.path.split(base_dir) if os.path.split(up_dir)[1] == 'include': up_dir = os.path.split(up_dir)[0] if self._check_directory(up_dir): return up_dir return None def _get_build_dir(self, mod_info): """ Figure out the build dir (i.e. where you run 'cmake'). This checks for a file called CMakeCache.txt, which is created when running cmake. If that hasn't happened, the build dir cannot be detected, unless it's called 'build', which is then assumed to be the build dir. """ base_build_dir = mod_info['base_dir'] if 'is_component' in mod_info.keys(): (base_build_dir, rest_dir) = os.path.split(base_build_dir) has_build_dir = os.path.isdir(os.path.join(base_build_dir , 'build')) if (has_build_dir and os.path.isfile(os.path.join(base_build_dir, 'CMakeCache.txt'))): return os.path.join(base_build_dir, 'build') else: for (dirpath, dirnames, filenames) in os.walk(base_build_dir): if 'CMakeCache.txt' in filenames: return dirpath if has_build_dir: return os.path.join(base_build_dir, 'build') return None def _get_include_dirs(self, mod_info): """ Figure out include dirs for the make process. """ inc_dirs = [] path_or_internal = {True: 'INTERNAL', False: 'PATH'}['is_component' in mod_info.keys()] try: cmakecache_fid = open(os.path.join(mod_info['build_dir'], 'CMakeCache.txt')) for line in cmakecache_fid: if line.find('GNURADIO_RUNTIME_INCLUDE_DIRS:%s' % path_or_internal) != -1: inc_dirs += line.replace('GNURADIO_RUNTIME_INCLUDE_DIRS:%s=' % path_or_internal, '').strip().split(';') except IOError: pass if len(inc_dirs) == 0 and self._suggested_dirs is not None: inc_dirs = [os.path.normpath(path) for path in self._suggested_dirs.split(':') if os.path.isdir(path)] return inc_dirs def _pretty_print(self, mod_info): """ Output the module info in human-readable format """ index_names = {'base_dir': 'Base directory', 'modname': 'Module name', 'is_component': 'Is GR component', 'build_dir': 'Build directory', 'incdirs': 'Include directories'} for key in mod_info.keys(): if key == 'version': print " API version: %s" % { '36': 'pre-3.7', '37': 'post-3.7', 'autofoo': 'Autotools (pre-3.5)' }[mod_info['version']] else: print '%19s: %s' % (index_names[key], mod_info[key])
grigoryvp/pyxcf
refs/heads/master
pyxcf/info.py
2
#!/usr/bin/env python # coding:utf-8 vi:et:ts=2 # pyxcf information. # Copyright 2013 Grigory Petrov # See LICENSE for details. import os import pkg_resources NAME_SHORT = "pyxcf" VER_MAJOR = 0 VER_MINOR = 1 try: VER_TXT = pkg_resources.require( NAME_SHORT )[ 0 ].version ## Installing via 'setup.py develop'? except pkg_resources.DistributionNotFound: VER_BUILD = 0 VER_TXT = ".".join( map( str, [ VER_MAJOR, VER_MINOR, VER_BUILD ] ) ) DIR_THIS = os.path.dirname( os.path.abspath( __file__ ) ) NAME_FULL = "Python XCF" DESCR = """ {s_name_short} v. {s_ver_txt}\\n\\n A simple python lib that can read some of .xcf file data into memory. """.replace( '\n', '' ).replace( '\\n', '\n' ).strip().format( s_name_short = NAME_SHORT, s_ver_txt = VER_TXT )
40423243/2017springcd_hw
refs/heads/gh-pages
data/py/script1.py
22
import sys import time import traceback import javascript from browser import document as doc, window, alert has_ace = True try: editor = window.ace.edit("editor") session = editor.getSession() session.setMode("ace/mode/python") editor.setOptions({ 'enableLiveAutocompletion': True, 'enableSnippets': True, 'highlightActiveLine': False, 'highlightSelectedWord': True }) except: from browser import html editor = html.TEXTAREA(rows=20, cols=70) doc["editor"] <= editor def get_value(): return editor.value def set_value(x):editor.value = x editor.getValue = get_value editor.setValue = set_value has_ace = False if hasattr(window, 'localStorage'): from browser.local_storage import storage else: storage = None def reset_src(): if storage is not None and "py_src" in storage: editor.setValue(storage["py_src"]) else: editor.setValue('for i in range(10):\n\tprint(i)') editor.scrollToRow(0) editor.gotoLine(0) def reset_src_area(): if storage and "py_src" in storage: editor.value = storage["py_src"] else: editor.value = 'for i in range(10):\n\tprint(i)' class cOutput: def __init__(self,target): self.target = doc[target] def write(self,data): self.target.value += str(data) #if "console" in doc: sys.stdout = cOutput("console") sys.stderr = cOutput("console") def to_str(xx): return str(xx) info = sys.implementation.version doc['version'].text = 'Brython %s.%s.%s' % (info.major, info.minor, info.micro) output = '' def show_console(ev): doc["console"].value = output doc["console"].cols = 60 doc["console"].rows = 10 # load a Python script def load_script(evt): _name = evt.target.value + '?foo=%s' % time.time() editor.setValue(open(_name).read()) # run a script, in global namespace if in_globals is True def run(*args): global output doc["console"].value = '' src = editor.getValue() if storage is not None: storage["py_src"] = src t0 = time.perf_counter() try: #ns = {'__name__':'__main__'} ns = {'__name__':'editor'} exec(src, ns) state = 1 except Exception as exc: traceback.print_exc(file=sys.stderr) state = 0 output = doc["console"].value print('<completed in %6.2f ms>' % ((time.perf_counter() - t0) * 1000.0)) return state if has_ace: reset_src() else: reset_src_area() def clear_console(ev): doc["console"].value = "" doc['run'].bind('click',run) doc['show_console'].bind('click',show_console) doc['clear_console'].bind('click',clear_console)
chendaniely/scipy_proceedings
refs/heads/master
publisher/writer/rstmath.py
13
# This code is from: http://pypi.python.org/pypi/rstex/ #!/usr/bin/python2 from docutils import utils, nodes from docutils.core import publish_cmdline from docutils.writers.latex2e import Writer, LaTeXTranslator from docutils.parsers.rst import roles, Directive, directives class InlineMath(nodes.Inline, nodes.TextElement): pass class PartMath(nodes.Part, nodes.Element): pass class PartLaTeX(nodes.Part, nodes.Element): pass def mathEnv(math, label, type): if label: eqn_star = '' else: eqn_star = '*' if type in ("split", "gathered"): begin = "\\begin{equation%s}\n\\begin{%s}\n" % (type, eqn_star) end = "\\end{%s}\n\\end{equation%s}\n" % (type, eqn_star) else: begin = "\\begin{%s%s}\n" % (type, eqn_star) end = "\\end{%s%s}" % (type, eqn_star) if label: begin += "\\label{%s}\n" % label return begin + math + '\n' + end def mathRole(role, rawtext, text, lineno, inliner, options={}, content=[]): latex = utils.unescape(text, restore_backslashes=True) return [InlineMath(latex=latex)], [] class MathDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 2 final_argument_whitespace = True option_spec = { 'type': directives.unchanged, 'label': directives.unchanged, } def run(self): latex = '\n'.join(self.content) if self.arguments and self.arguments[0]: latex = ' '.join(self.arguments) + '\n\n' + latex node = PartMath() node['latex'] = latex.strip() node['label'] = self.options.get('label', None) node['type'] = self.options.get('type', "equation") ret = [node] return ret class LaTeXDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 1 final_argument_whitespace = True option_spec = { 'usepackage': directives.unchanged } def run(self): latex = '\n'.join(self.content) if self.arguments and self.arguments[0]: latex = self.arguments[0] + '\n\n' + latex node = PartLaTeX() node['latex'] = latex node['usepackage'] = self.options.get("usepackage", "").split(",") ret = [node] return ret roles.register_local_role("math", mathRole) directives.register_directive("math", MathDirective) directives.register_directive("latex", LaTeXDirective)
openSUSE/libstorage-bgl-eval
refs/heads/master
integration-tests/misc/light-probe.py
4
#!/usr/bin/python3 # requirements: some disks or nothing from storage import * set_logger(get_logfile_logger()) something = light_probe() print(something)
quarkonics/zstack-woodpecker
refs/heads/master
integrationtest/vm/multihosts/volumes/test_4vm_snapshot_robot.py
2
''' Robot Test only includes Vm operations, Volume operations and Snapshot operations @author: Youyk ''' import zstackwoodpecker.action_select as action_select import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_state as test_state import zstackwoodpecker.test_lib as test_lib import zstackwoodpecker.header.vm as vm_header import os test_dict = test_state.TestStateDict() def test(): test_util.test_dsc(''' Will doing random test for VIP operations, including VIP create/delete, PF create/attach/detach/remove, EIP create/attach/detach/remove. VM operations will also be tested. If reach max 4 coexisting running vm, testing will success and quit. SG actions, Volume actions and Image actions are removed in this robot test. VM resources: VIP testing needs at least 3 VRs are running. ''') target_running_vm = 4 public_l3 = test_lib.lib_get_l3_by_name(os.environ.get('l3PublicNetworkName')) vm_create_option = test_util.VmOption() #image has to use virtual router image, as it needs to do port checking vm_create_option.set_image_uuid(test_lib.lib_get_image_by_name(img_name=os.environ.get('imageName_net')).uuid) priority_actions = test_state.TestAction.snapshot_actions * 4 utility_vm_create_option = test_util.VmOption() utility_vm_create_option.set_image_uuid(test_lib.lib_get_image_by_name(img_name=os.environ.get('imageName_net')).uuid) l3_uuid = test_lib.lib_get_l3_by_name(os.environ.get('l3VlanNetworkName1')).uuid utility_vm_create_option.set_l3_uuids([l3_uuid]) utility_vm = test_lib.lib_create_vm(utility_vm_create_option) test_dict.add_utility_vm(utility_vm) utility_vm.check() test_util.test_dsc('Random Test Begin. Test target: 4 coexisting running VM (not include VR and SG target test VMs.).') robot_test_obj = test_util.Robot_Test_Object() robot_test_obj.set_test_dict(test_dict) robot_test_obj.set_vm_creation_option(vm_create_option) priority_action_obj = action_select.ActionPriority() priority_action_obj.add_priority_action_list(priority_actions) robot_test_obj.set_priority_actions(priority_action_obj) robot_test_obj.set_exclusive_actions_list(\ test_state.TestAction.vip_actions + \ test_state.TestAction.image_actions + \ test_state.TestAction.sg_actions) robot_test_obj.set_public_l3(public_l3) robot_test_obj.set_utility_vm(utility_vm) rounds = 1 while len(test_dict.get_vm_list(vm_header.RUNNING)) < target_running_vm: test_util.test_dsc('New round %s starts: random operation pickup.' % rounds) test_lib.lib_vm_random_operation(robot_test_obj) test_util.test_dsc('===============Round %s finished. Begin status checking.================' % rounds) rounds += 1 test_lib.lib_robot_status_check(test_dict) test_util.test_dsc('Reach test pass exit criterial.') test_lib.lib_robot_cleanup(test_dict) test_util.test_pass('Snapshots Robot Test Success') #Will be called only if exception happens in test(). def error_cleanup(): test_lib.lib_robot_cleanup(test_dict)
stadt-karlsruhe/python-oparl
refs/heads/master
tests/test_oparl.py
1
#!/usr/bin/env python # encoding: utf-8 # Copyright (c) 2016, Stadt Karlsruhe (www.karlsruhe.de) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings import mock import pytest import six import oparl import oparl.objects # Objects provided by the mocked ``_get_json``, keys are the URLs OBJECTS = { 'a-legislativeterm': { 'id': 'a-legislativeterm', 'type': 'https://schema.oparl.org/1.0/LegislativeTerm', }, 'object-with-id-that-differs-from-its-url': { 'id': 'this is not my url', 'type': 'https://schema.oparl.org/1.0/System', }, 'a-location': { 'id': 'a-location', 'type': 'https://schema.oparl.org/1.0/Location', }, } @pytest.fixture(scope='module', autouse=True) def mock_oparl(): ''' Mock parts of the ``oparl`` module. ``oparl._get_json`` is mocked so that it loads JSON data from ``OBJECTS``. ``oparl._is_url`` is mocked so that every string looks like an URL. ''' def is_url(value): return isinstance(value, six.string_types) with mock.patch('oparl._get_json', new=OBJECTS.__getitem__): with mock.patch('oparl._is_url', new=is_url): yield def test_invalid_date_string_triggers_contentwarning(): with pytest.warns(oparl.ContentWarning) as record: oparl.from_json('''{ "id": "object-with-invalid-date", "type": "https://schema.oparl.org/1.0/Organization", "startDate": "this is not a date" }''') assert len(record) == 1 assert 'invalid date string' in str(record[0].message) def test_invalid_datetime_string_triggers_contentwarning(): with pytest.warns(oparl.ContentWarning) as record: oparl.from_json('''{ "id": "object-with-invalid-datetime", "type": "https://schema.oparl.org/1.0/Organization", "created": "this is not a date-time" }''') assert len(record) == 1 assert 'invalid date-time string' in str(record[0].message) def test_scalar_instead_of_list_triggers_specificationwarning(): with pytest.warns(oparl.SpecificationWarning) as record: obj = oparl.from_json('''{ "id": "object-with-scalar-instead-of-list", "type": "https://schema.oparl.org/1.0/Person", "membership": { "id": "does-not-exist", "type": "https://schema.oparl.org/1.0/Membership" } }''') assert len(record) == 1 assert 'non-list value' in str(record[0].message) membership = obj['membership'] assert isinstance(membership, list) assert len(membership) == 1 assert membership[0]['id'] == 'does-not-exist' def test_reference_instead_of_object_triggers_specificationwarning(): with pytest.warns(oparl.SpecificationWarning) as record: obj = oparl.from_json('''{ "id": "object-with-reference-instead-of-object", "type": "https://schema.oparl.org/1.0/Body", "location": "a-location" }''') assert len(record) == 1 assert 'must contain an object' in str(record[0].message) location = obj['location'] assert isinstance(location, oparl.objects.Location) assert location['id'] == 'a-location' def test_reference_instead_of_object_in_list_triggers_specificationwarning(): with pytest.warns(oparl.SpecificationWarning) as record: obj = oparl.from_json('''{ "id": "object-with-reference-instead-of-object-in-list", "type": "https://schema.oparl.org/1.0/Body", "legislativeTerm": ["a-legislativeterm"] }''') assert len(record) == 1 assert 'must contain objects' in str(record[0].message) terms = obj['legislativeTerm'] assert isinstance(terms, list) assert len(terms) == 1 assert isinstance(terms[0], oparl.objects.LegislativeTerm) assert terms[0]['id'] == 'a-legislativeterm' def test_object_instead_of_reference_triggers_specificationwarning(): with pytest.warns(oparl.SpecificationWarning) as record: obj = oparl.from_json('''{ "id": "object-with-object-instead-of-reference", "type": "https://schema.oparl.org/1.0/Body", "system": { "id": "does-not-exist", "type": "https://schema.oparl.org/1.0/System" } }''') assert len(record) == 1 assert 'must contain an object reference' in str(record[0].message) system = obj['system'] assert isinstance(system, oparl.objects.System) assert system['id'] == 'does-not-exist' def test_object_instead_of_reference_in_list_triggers_specificationwarning(): with pytest.warns(oparl.SpecificationWarning) as record: obj = oparl.from_json('''{ "id": "object-with-object-instead-of-reference-in-list", "type": "https://schema.oparl.org/1.0/System", "otherOparlVersions": [{ "id": "does-not-exist", "type": "https://schema.oparl.org/1.0/System" }] }''') assert len(record) == 1 assert 'must contain references' in str(record[0].message) others = obj['otherOparlVersions'] assert isinstance(others, list) assert len(others) == 1 assert isinstance(others[0], oparl.objects.System) assert others[0]['id'] == 'does-not-exist' def test_id_that_differs_from_url_triggers_contentwarning(): obj = oparl._lazy('object-with-id-that-differs-from-its-url', 'https://schema.oparl.org/1.0/System') with pytest.warns(oparl.ContentWarning) as record: obj.load() assert len(record) == 1 assert 'a different ID' in str(record[0].message) assert obj['id'] == 'this is not my url' def test_invalid_schema_uri_triggers_specificationwarning(): with pytest.warns(oparl.SpecificationWarning) as record: obj = oparl.from_json('''{ "id": "object-with-invalid-schema-uri", "type": "this-is-not-the-correct-schema-uri/System" }''') assert len(record) == 2 assert 'Invalid schema URI' in str(record[0].message) assert str(record[0].message) == str(record[1].message) assert obj['type'] == 'this-is-not-the-correct-schema-uri/System' def test_missing_id_raises_valueerror(): with pytest.raises(ValueError) as e: oparl.from_json('''{ "type": "https://schema.oparl.org/1.0/System" }''') assert 'does not have an `id` field' in str(e.value) def test_missing_type_raises_valueerror(): with pytest.raises(ValueError) as e: oparl.from_json('''{ "id": "does-not-exist" }''') assert 'does not have a `type` field' in str(e.value) def test_type_mismatch_raises_valueerror(): obj = oparl._lazy('a-location', 'https://schema.oparl.org/1.0/System') with pytest.raises(ValueError) as e: obj.load() assert 'does not match instance type' in str(e.value) def test_invalid_type_uri_raises_valuerror(): with pytest.raises(ValueError) as e: oparl.from_json('''{ "id": "does-not-exist", "type": "invalid" }''') assert 'Invalid type URI' in str(e.value) def test_unknown_type_raises_valueerror(): with pytest.raises(ValueError) as e: oparl.from_json('''{ "id": "does-not-exist", "type": "not/known" }''') assert 'Unknown type' in str(e.value)
kostajaitachi/shogun
refs/heads/develop
examples/undocumented/python_modular/modelselection_parameter_tree_modular.py
5
#!/usr/bin/env python # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Written (W) 2011-2012 Heiko Strathmann # Copyright (C) 2011 Berlin Institute of Technology and Max-Planck-Society # parameter_list=[[None]] def modelselection_parameter_tree_modular (dummy): from modshogun import ParameterCombination from modshogun import ModelSelectionParameters, R_EXP, R_LINEAR from modshogun import PowerKernel from modshogun import GaussianKernel from modshogun import DistantSegmentsKernel from modshogun import MinkowskiMetric root=ModelSelectionParameters() combinations=root.get_combinations() combinations.get_num_elements() c=ModelSelectionParameters('C'); root.append_child(c) c.build_values(1, 11, R_EXP) power_kernel=PowerKernel() # print all parameter available for modelselection # Dont worry if yours is not included but, write to the mailing list #power_kernel.print_modsel_params() param_power_kernel=ModelSelectionParameters('kernel', power_kernel) root.append_child(param_power_kernel) param_power_kernel_degree=ModelSelectionParameters('degree') param_power_kernel_degree.build_values(1, 1, R_EXP) param_power_kernel.append_child(param_power_kernel_degree) metric1=MinkowskiMetric(10) # print all parameter available for modelselection # Dont worry if yours is not included but, write to the mailing list #metric1.print_modsel_params() param_power_kernel_metric1=ModelSelectionParameters('distance', metric1) param_power_kernel.append_child(param_power_kernel_metric1) param_power_kernel_metric1_k=ModelSelectionParameters('k') param_power_kernel_metric1_k.build_values(1, 12, R_LINEAR) param_power_kernel_metric1.append_child(param_power_kernel_metric1_k) gaussian_kernel=GaussianKernel() # print all parameter available for modelselection # Dont worry if yours is not included but, write to the mailing list #gaussian_kernel.print_modsel_params() param_gaussian_kernel=ModelSelectionParameters('kernel', gaussian_kernel) root.append_child(param_gaussian_kernel) param_gaussian_kernel_width=ModelSelectionParameters('width') param_gaussian_kernel_width.build_values(1, 2, R_EXP) param_gaussian_kernel.append_child(param_gaussian_kernel_width) ds_kernel=DistantSegmentsKernel() # print all parameter available for modelselection # Dont worry if yours is not included but, write to the mailing list #ds_kernel.print_modsel_params() param_ds_kernel=ModelSelectionParameters('kernel', ds_kernel) root.append_child(param_ds_kernel) param_ds_kernel_delta=ModelSelectionParameters('delta') param_ds_kernel_delta.build_values(1, 2, R_EXP) param_ds_kernel.append_child(param_ds_kernel_delta) param_ds_kernel_theta=ModelSelectionParameters('theta') param_ds_kernel_theta.build_values(1, 2, R_EXP) param_ds_kernel.append_child(param_ds_kernel_theta) # root.print_tree() combinations=root.get_combinations() # for i in range(combinations.get_num_elements()): # combinations.get_element(i).print_tree() return if __name__=='__main__': print('ModelSelection ParameterTree') modelselection_parameter_tree_modular(*parameter_list[0])
dsandeephegde/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/py/testing/io_/__init__.py
9480
#
overtherain/scriptfile
refs/heads/master
software/googleAppEngine/lib/django_1_3/tests/regressiontests/views/app3/__init__.py
9480
#
kontrafiktion/ansible
refs/heads/devel
contrib/inventory/abiquo.py
110
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' External inventory script for Abiquo ==================================== Shamelessly copied from an existing inventory script. This script generates an inventory that Ansible can understand by making API requests to Abiquo API Requires some python libraries, ensure to have them installed when using this script. This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6. Before using this script you may want to modify abiquo.ini config file. This script generates an Ansible hosts file with these host groups: ABQ_xxx: Defines a hosts itself by Abiquo VM name label all: Contains all hosts defined in Abiquo user's enterprise virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it imagetemplate: Creates a host group for each image template containing all hosts using it ''' # (c) 2014, Daniel Beneyto <[email protected]> # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os import sys import time import ConfigParser try: import json except ImportError: import simplejson as json from ansible.module_utils.urls import open_url def api_get(link, config): try: if link == None: url = config.get('api','uri') + config.get('api','login_path') headers = {"Accept": config.get('api','login_type')} else: url = link['href'] + '?limit=0' headers = {"Accept": link['type']} result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''), url_password=config.get('auth','apipass').replace('\n', '')) return json.loads(result.read()) except: return None def save_cache(data, config): ''' saves item to cache ''' dpath = config.get('cache','cache_dir') try: cache = open('/'.join([dpath,'inventory']), 'w') cache.write(json.dumps(data)) cache.close() except IOError as e: pass # not really sure what to do here def get_cache(cache_item, config): ''' returns cached item ''' dpath = config.get('cache','cache_dir') inv = {} try: cache = open('/'.join([dpath,'inventory']), 'r') inv = cache.read() cache.close() except IOError as e: pass # not really sure what to do here return inv def cache_available(config): ''' checks if we have a 'fresh' cache available for item requested ''' if config.has_option('cache','cache_dir'): dpath = config.get('cache','cache_dir') try: existing = os.stat( '/'.join([dpath,'inventory'])) except: # cache doesn't exist or isn't accessible return False if config.has_option('cache', 'cache_max_age'): maxage = config.get('cache', 'cache_max_age') if ((int(time.time()) - int(existing.st_mtime)) <= int(maxage)): return True return False def generate_inv_from_api(enterprise_entity,config): try: inventory['all'] = {} inventory['all']['children'] = [] inventory['all']['hosts'] = [] inventory['_meta'] = {} inventory['_meta']['hostvars'] = {} enterprise = api_get(enterprise_entity,config) vms_entity = next(link for link in (enterprise['links']) if (link['rel']=='virtualmachines')) vms = api_get(vms_entity,config) for vmcollection in vms['collection']: vm_vapp = next(link for link in (vmcollection['links']) if (link['rel']=='virtualappliance'))['title'].replace('[','').replace(']','').replace(' ','_') vm_vdc = next(link for link in (vmcollection['links']) if (link['rel']=='virtualdatacenter'))['title'].replace('[','').replace(']','').replace(' ','_') vm_template = next(link for link in (vmcollection['links']) if (link['rel']=='virtualmachinetemplate'))['title'].replace('[','').replace(']','').replace(' ','_') # From abiquo.ini: Only adding to inventory VMs with public IP if (config.getboolean('defaults', 'public_ip_only')) == True: for link in vmcollection['links']: if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'): vm_nic = link['title'] break else: vm_nic = None # Otherwise, assigning defined network interface IP address else: for link in vmcollection['links']: if (link['rel']==config.get('defaults', 'default_net_interface')): vm_nic = link['title'] break else: vm_nic = None vm_state = True # From abiquo.ini: Only adding to inventory VMs deployed if ((config.getboolean('defaults', 'deployed_only') == True) and (vmcollection['state'] == 'NOT_ALLOCATED')): vm_state = False if not vm_nic == None and vm_state: if not vm_vapp in inventory.keys(): inventory[vm_vapp] = {} inventory[vm_vapp]['children'] = [] inventory[vm_vapp]['hosts'] = [] if not vm_vdc in inventory.keys(): inventory[vm_vdc] = {} inventory[vm_vdc]['hosts'] = [] inventory[vm_vdc]['children'] = [] if not vm_template in inventory.keys(): inventory[vm_template] = {} inventory[vm_template]['children'] = [] inventory[vm_template]['hosts'] = [] if config.getboolean('defaults', 'get_metadata') == True: meta_entity = next(link for link in (vmcollection['links']) if (link['rel']=='metadata')) try: metadata = api_get(meta_entity,config) if (config.getfloat("api","version") >= 3.0): vm_metadata = metadata['metadata'] else: vm_metadata = metadata['metadata']['metadata'] inventory['_meta']['hostvars'][vm_nic] = vm_metadata except Exception as e: pass inventory[vm_vapp]['children'].append(vmcollection['name']) inventory[vm_vdc]['children'].append(vmcollection['name']) inventory[vm_template]['children'].append(vmcollection['name']) inventory['all']['children'].append(vmcollection['name']) inventory[vmcollection['name']] = [] inventory[vmcollection['name']].append(vm_nic) return inventory except Exception as e: # Return empty hosts output return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } } def get_inventory(enterprise, config): ''' Reads the inventory from cache or Abiquo api ''' if cache_available(config): inv = get_cache('inventory', config) else: default_group = os.path.basename(sys.argv[0]).rstrip('.py') # MAKE ABIQUO API CALLS # inv = generate_inv_from_api(enterprise,config) save_cache(inv, config) return json.dumps(inv) if __name__ == '__main__': inventory = {} enterprise = {} # Read config config = ConfigParser.SafeConfigParser() for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']: if os.path.exists(configfilename): config.read(configfilename) break try: login = api_get(None,config) enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise')) except Exception as e: enterprise = None if cache_available(config): inventory = get_cache('inventory', config) else: inventory = get_inventory(enterprise, config) # return to ansible sys.stdout.write(str(inventory)) sys.stdout.flush()
gccpacman/pydelicious-newAPI
refs/heads/master
tools/dlcs_feeds.py
23
#!/usr/bin/env python try: from hashlib import md5 except ImportError: from md5 import md5 from itertools import chain import locale import optparse import os from pprint import pprint, pformat import re import sys import pydelicious #ENCODING = locale.getpreferredencoding() __usage__ = """%prog [options] [command] [args...] """ __options__ = [ # (('-e', '--encoding'),{'default':ENCODING, # 'help':"Use custom character encoding [locale: %default]"}), (('-C', '--clear-screen'),{'action':'store_true'}), (('-L', '--list-feeds'),{'action':'store_true','help':'List available feeds for given parameters. '}), (('-f', '--format'),{'default':'rss'}), (('-k', '--key'),{}), (('-l', '--url'),{}), (('-H', '--urlmd5'),{}), (('-t', '--tag'),{'dest':'tags','action':'append'}), (('-u', '--username'),{}), # (('-v', '--verboseness'),{'default':0, # 'help':"TODO: Increase or set DEBUG (defaults to 0 or the DLCS_DEBUG env. var.)"}) ] def parse_argv(options, argv, usage="%prog [args] [options]"): parser = optparse.OptionParser(usage) for opt in options: parser.add_option(*opt[0], **opt[1]) optsv, args = parser.parse_args(argv) if optsv.url and not optsv.urlmd5: optsv.urlmd5 = md5(optsv.url).hexdigest() if optsv.tags: optsv.tag = ' '.join(optsv.tags) else: optsv.tag = None return parser, optsv, args find_patterns = re.compile('%\(([^)]*)\)s').findall def feeds_for_params(**params): kandidates = {} "Feed paths that need more parameters. " matches = [] "Feed paths that match current paramters. " params = set([p for p in params if type(params[p]) != type(None)]) for name, path in pydelicious.delicious_v2_feeds.items(): ptrns = set(find_patterns(path)) if ptrns > params: kandidates[name] = params.difference(ptrns) elif ptrns == params: matches.append(name) return matches, kandidates def main(argv): optparser, opts, args = parse_argv(__options__, argv, __usage__) kwds = {} for k in ('format','username','tag','urlmd5','key'): v = getattr(opts, k) if type(v) != type(None): kwds[k] = v if opts.clear_screen: if os.name == 'posix': os.system('clear') else: os.system('cls') matches, candidates = feeds_for_params(**kwds) if opts.list_feeds: print >>sys.stderr,"Feeds for current parameters (%s)" % kwds if matches: print "Exact matches:" for m in matches: print '\t'+m+':', pydelicious.delicious_v2_feeds[m] % kwds if candidates: print "Candidates:" for m in candidates: print '\t'+pydelicious.delicious_v2_feeds[m] sys.exit() path = args and args.pop(0) if not path: if len(matches) == 1: path = matches[0] print >>sys.stderr, "Setting path to %s" % path elif matches: print >>sys.stderr, "Multiple paths for given parameters, see -L" sys.exit() assert path in pydelicious.delicious_v2_feeds return pydelicious.dlcs_feed(path, **kwds) def _main(): try: print sys.exit(main(sys.argv[1:])) except KeyboardInterrupt: print >>sys.stderr, "User interrupt" # Entry point if __name__ == '__main__': _main() # vim:noet:
fldc/CouchPotatoServer
refs/heads/custom
libs/bs4/diagnose.py
431
"""Diagnostic functions, mainly for use when doing tech support.""" import cProfile from StringIO import StringIO from HTMLParser import HTMLParser import bs4 from bs4 import BeautifulSoup, __version__ from bs4.builder import builder_registry import os import pstats import random import tempfile import time import traceback import sys import cProfile def diagnose(data): """Diagnostic suite for isolating common problems.""" print "Diagnostic running on Beautiful Soup %s" % __version__ print "Python version %s" % sys.version basic_parsers = ["html.parser", "html5lib", "lxml"] for name in basic_parsers: for builder in builder_registry.builders: if name in builder.features: break else: basic_parsers.remove(name) print ( "I noticed that %s is not installed. Installing it may help." % name) if 'lxml' in basic_parsers: basic_parsers.append(["lxml", "xml"]) from lxml import etree print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) if 'html5lib' in basic_parsers: import html5lib print "Found html5lib version %s" % html5lib.__version__ if hasattr(data, 'read'): data = data.read() elif os.path.exists(data): print '"%s" looks like a filename. Reading data from the file.' % data data = open(data).read() elif data.startswith("http:") or data.startswith("https:"): print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." return print for parser in basic_parsers: print "Trying to parse your markup with %s" % parser success = False try: soup = BeautifulSoup(data, parser) success = True except Exception, e: print "%s could not parse the markup." % parser traceback.print_exc() if success: print "Here's what %s did with the markup:" % parser print soup.prettify() print "-" * 80 def lxml_trace(data, html=True, **kwargs): """Print out the lxml events that occur during parsing. This lets you see how lxml parses a document when no Beautiful Soup code is running. """ from lxml import etree for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): print("%s, %4s, %s" % (event, element.tag, element.text)) class AnnouncingParser(HTMLParser): """Announces HTMLParser parse events, without doing anything else.""" def _p(self, s): print(s) def handle_starttag(self, name, attrs): self._p("%s START" % name) def handle_endtag(self, name): self._p("%s END" % name) def handle_data(self, data): self._p("%s DATA" % data) def handle_charref(self, name): self._p("%s CHARREF" % name) def handle_entityref(self, name): self._p("%s ENTITYREF" % name) def handle_comment(self, data): self._p("%s COMMENT" % data) def handle_decl(self, data): self._p("%s DECL" % data) def unknown_decl(self, data): self._p("%s UNKNOWN-DECL" % data) def handle_pi(self, data): self._p("%s PI" % data) def htmlparser_trace(data): """Print out the HTMLParser events that occur during parsing. This lets you see how HTMLParser parses a document when no Beautiful Soup code is running. """ parser = AnnouncingParser() parser.feed(data) _vowels = "aeiou" _consonants = "bcdfghjklmnpqrstvwxyz" def rword(length=5): "Generate a random word-like string." s = '' for i in range(length): if i % 2 == 0: t = _consonants else: t = _vowels s += random.choice(t) return s def rsentence(length=4): "Generate a random sentence-like string." return " ".join(rword(random.randint(4,9)) for i in range(length)) def rdoc(num_elements=1000): """Randomly generate an invalid HTML document.""" tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] elements = [] for i in range(num_elements): choice = random.randint(0,3) if choice == 0: # New tag. tag_name = random.choice(tag_names) elements.append("<%s>" % tag_name) elif choice == 1: elements.append(rsentence(random.randint(1,4))) elif choice == 2: # Close a tag. tag_name = random.choice(tag_names) elements.append("</%s>" % tag_name) return "<html>" + "\n".join(elements) + "</html>" def benchmark_parsers(num_elements=100000): """Very basic head-to-head performance benchmark.""" print "Comparative parser benchmark on Beautiful Soup %s" % __version__ data = rdoc(num_elements) print "Generated a large invalid HTML document (%d bytes)." % len(data) for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: success = False try: a = time.time() soup = BeautifulSoup(data, parser) b = time.time() success = True except Exception, e: print "%s could not parse the markup." % parser traceback.print_exc() if success: print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) from lxml import etree a = time.time() etree.HTML(data) b = time.time() print "Raw lxml parsed the markup in %.2fs." % (b-a) import html5lib parser = html5lib.HTMLParser() a = time.time() parser.parse(data) b = time.time() print "Raw html5lib parsed the markup in %.2fs." % (b-a) def profile(num_elements=100000, parser="lxml"): filehandle = tempfile.NamedTemporaryFile() filename = filehandle.name data = rdoc(num_elements) vars = dict(bs4=bs4, data=data, parser=parser) cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) stats = pstats.Stats(filename) # stats.strip_dirs() stats.sort_stats("cumulative") stats.print_stats('_html5lib|bs4', 50) if __name__ == '__main__': diagnose(sys.stdin.read())
mangaki/mangaki
refs/heads/master
mangaki/mangaki/tests/test_posters.py
1
from unittest.mock import patch, NonCallableMock from django.test import TestCase from mangaki.models import Editor, Studio, Work, Category from mangaki.utils.db import get_potential_posters class PostersTest(TestCase): def setUp(self): anime = Category.objects.get(slug='anime') self.kiznaiver = Work.objects.create( title='Kiznaiver', category=anime, ext_poster='bRoKeN_lInK' # That's how I feel when I see a broken poster. ) @patch('mangaki.utils.db.client.search_work') def test_get_potential_posters(self, mocked_search): with self.subTest('When MAL returns no poster'): expected = [{ 'current': True, 'url': self.kiznaiver.ext_poster }] mocked_search.return_value = NonCallableMock(poster=None) # Let the magic occur. posters = get_potential_posters(self.kiznaiver) # In this case, `get_potential_posters` cannot fix the current poster. self.assertCountEqual(posters, expected) with self.subTest('When MAL returns a poster'): expected = [{ 'current': True, 'url': self.kiznaiver.ext_poster }, { 'current': False, 'url': 'kiznaiver_mal_poster_url' }] mocked_search.return_value = NonCallableMock(poster=expected[1]['url']) posters = get_potential_posters(self.kiznaiver) # In this case, `get_potential_posters` should return a list of two posters, i.e. old external, MAL's one. self.assertCountEqual(posters, expected)
bieschke/nuffle
refs/heads/master
lib/python/formencode/schema.py
1
from interfaces import * from api import * import declarative __all__ = ['Schema'] class Schema(FancyValidator): """ A schema validates a dictionary of values, applying different validators (be key) to the different values. If allow_extra_fields=True, keys without validators will be allowed; otherwise they will raise Invalid. If filter_extra_fields is set to true, then extra fields are not passed back in the results. Validators are associated with keys either with a class syntax, or as keyword arguments (class syntax is usually easier). Something like:: class MySchema(Schema): name = Validators.PlainText() phone = Validators.PhoneNumber() These will not be available as actual instance variables, but will be collected in a dictionary. To remove a validator in a subclass that is present in a superclass, set it to None, like:: class MySubSchema(MySchema): name = None """ # These validators will be applied before this schema: pre_validators = [] # These validators will be applied after this schema: chained_validators = [] # If true, then it is not an error when keys that aren't # associated with a validator are present: allow_extra_fields = False # If true, then keys that aren't associated with a validator # are removed: filter_extra_fields = False # If this is given, then any keys that aren't available but # are expected will be replaced with this value (and then # validated!) This does not override a present .if_missing # attribute on validators: if_key_missing = NoDefault compound = True fields = {} order = [] messages = { 'notExpected': 'The input field %(name)s was not expected.', 'missingValue': "Missing value", } __mutableattributes__ = ('fields', 'chained_validators', 'pre_validators') def __classinit__(cls, new_attrs): FancyValidator.__classinit__(cls, new_attrs) # Don't bother doing anything if this is the most parent # Schema class (which is the only class with just # FancyValidator as a superclass): if cls.__bases__ == (FancyValidator,): return cls # Scan through the class variables we've defined *just* # for this subclass, looking for validators (both classes # and instances): for key, value in new_attrs.items(): if key in ('pre_validators', 'chained_validators', 'view'): continue if is_validator(value): cls.fields[key] = value delattr(cls, key) # This last case means we're overwriting a validator # from a superclass: elif cls.fields.has_key(key): del cls.fields[key] for name, value in cls.fields.items(): cls.add_field(name, value) def __initargs__(self, new_attrs): for key, value in new_attrs.items(): if key in ('pre_validators', 'chained_validators', 'view'): continue if is_validator(value): self.fields[key] = value delattr(self, key) # This last case means we're overwriting a validator # from a superclass: elif self.fields.has_key(key): del self.fields[key] for name, value in self.fields.items(): self.add_field(name, value) def _to_python(self, value_dict, state): if not value_dict and self.if_empty is not NoDefault: return self.if_empty for validator in self.pre_validators: value_dict = validator.to_python(value_dict, state) new = {} errors = {} unused = self.fields.keys() if state is not None: previous_key = getattr(state, 'key', None) previous_full_dict = getattr(state, 'full_dict', None) state.full_dict = value_dict try: for name, value in value_dict.items(): try: unused.remove(name) except ValueError: if not self.allow_extra_fields: raise Invalid( self.message('notExpected', state, name=repr(name)), value_dict, state) else: if not self.filter_extra_fields: new[name] = value continue validator = self.fields[name] try: new[name] = validator.to_python(value, state) except Invalid, e: errors[name] = e for name in unused: validator = self.fields[name] try: if_missing = validator.if_missing except AttributeError: if_missing = NoDefault if if_missing is NoDefault: if self.if_key_missing is NoDefault: errors[name] = Invalid( self.message('missingValue', state), None, state) else: try: new[name] = validator.to_python(self.if_key_missing, state) except Invalid, e: errors[name] = e else: new[name] = validator.if_missing if errors: for validator in self.chained_validators: if (not hasattr(validator, 'validate_partial') or not getattr(validator, 'validate_partial_form', False)): continue try: validator.validate_partial(value_dict, state) except Invalid, e: sub_errors = e.unpack_errors() if not isinstance(sub_errors, dict): # Can't do anything here continue merge_dicts(errors, sub_errors) if errors: raise Invalid( format_compound_error(errors), value_dict, state, error_dict=errors) for validator in self.chained_validators: new = validator.to_python(new, state) return new finally: if state is not None: state.key = previous_key state.full_dict = previous_full_dict def _from_python(self, value_dict, state): chained = self.chained_validators[:] chained.reverse() finished = [] for validator in chained: __traceback_info__ = 'for_python chained_validator %s (finished %s)' % (validator, ', '.join(map(repr, finished)) or 'none') finished.append(validator) value_dict = validator.from_python(value_dict, state) new = {} errors = {} unused = self.fields.keys() if state is not None: previous_key = getattr(state, 'key', None) previous_full_dict = getattr(state, 'full_dict', None) state.full_dict = value_dict try: for name, value in value_dict.items(): __traceback_info__ = 'for_python in %s' % name try: unused.remove(name) except ValueError: if not self.allow_extra_fields: raise Invalid( self.message('notExpected', state, name=repr(name)), value_dict, state) if not self.filter_extra_fields: new[name] = value else: try: new[name] = self.fields[name].from_python(value, state) except Invalid, e: errors[name] = e del __traceback_info__ for name in unused: validator = self.fields[name] try: new[name] = validator.from_python(None, state) except Invalid, e: errors[name] = e if errors: raise Invalid( format_compound_error(errors), value_dict, state, error_dict=errors) pre = self.pre_validators[:] pre.reverse() for validator in pre: __traceback_info__ = 'for_python pre_validator %s' % validator new = validator.from_python(new, state) return new finally: if state is not None: state.key = previous_key state.full_dict = previous_full_dict def add_chained_validator(self, cls, validator): if self is not None: if self.chained_validators is cls.chained_validators: self.chained_validators = cls.chained_validators[:] self.chained_validators.append(validator) else: cls.chained_validators.append(validator) add_chained_validator = declarative.classinstancemethod( add_chained_validator) def add_field(self, cls, name, validator): if self is not None: if self.fields is cls.fields: self.fields = cls.fields.copy() self.fields[name] = validator else: cls.fields[name] = validator add_field = declarative.classinstancemethod(add_field) def add_pre_validator(self, cls, validator): if self is not None: if self.pre_validators is cls.pre_validators: self.pre_validators = cls.pre_validators[:] self.pre_validators.append(validator) else: cls.pre_validators.append(validator) add_pre_validator = declarative.classinstancemethod(add_pre_validator) def format_compound_error(v, indent=0): if isinstance(v, Exception): return str(v) elif isinstance(v, dict): l = v.items() l.sort() return ('%s\n' % (' '*indent)).join( ["%s: %s" % (k, format_compound_error(value, indent=len(k)+2)) for k, value in l if value is not None]) elif isinstance(v, list): return ('%s\n' % (' '*indent)).join( ['%s' % (format_compound_error(value, indent=indent)) for value in v if value is not None]) elif isinstance(v, str): return v else: assert 0, "I didn't expect something like %s" % repr(v) def merge_dicts(d1, d2): for key in d2: if key in d1: d1[key] = merge_values(d1[key], d2[key]) else: d1[key] = d2[key] return d1 def merge_values(v1, v2): if (isinstance(v1, (str, unicode)) and isinstance(v2, (str, unicode))): return v1 + '\n' + v2 elif (isinstance(v1, (list, tuple)) and isinstance(v2, (list, tuple))): return merge_lists(v1, v2) elif isinstance(v1, dict) and isinstance(v2, dict): return merge_dicts(v1, v2) else: # @@: Should we just ignore errors? Seems we do... return v1 def merge_lists(l1, l2): if len(l1) < len(l2): l1 = l1 + [None]*(len(l2)-len(l1)) elif len(l2) < len(l1): l2 = l2 + [None]*(len(l1)-len(l2)) result = [] for l1item, l2item in zip(l1, l2): item = None if l1item is None: item = l2item elif l2item is None: item = l1item else: item = merge_values(l1item, l2item) result.append(item) return result
ilathid/ilathidEngine
refs/heads/master
vplayer/old/m_movie.py
1
# 3 options: # 1: video player is dormant, and renders a frame and returns it on function call # 2: video player is threaded, and a frame can be retrieved at any time via function call # 3: video player is threaded, and a callback is made to external object with rendered frames # # For our purposes, 2 looks to be the best (in my opinion), since our engine is "active" (always running at some framerate). If we ever change this, then I would suggest option 3: the video player calls an external function to notify that a frame is ready. # group=None, target=None, name=None, args=(), kwargs={} import threading import time import pymedia import pymedia.muxer as muxer import pymedia.audio.acodec as acodec import pymedia.video.vcodec as vcodec import pymedia.audio.sound as sound # import ao import pygame # PlaybackBuffer is a buffer for processed audio around the sound module. I use it because the pymedia snd module will block if its internal buffer is full, which is undesireable for the main video playback. I also can't make use of pymedia.snd.getSpace() because it is broken in (at least) linux, and doesn't seem to give all that reasonable of data. # The result is that the snd module only needs a snd.play(data) function, which is good because it means something like libao could just as easily be used. class PlaybackBuffer: eob = 0.0 aBuffer = [] def __init__(self, snd): self.snd = snd self.t = threading.Thread(None, target=self.process) self.aBuffer = [] self.eob = time.time() self._blk = threading.Semaphore(1) self._stop = threading.Event() # Stop Event. Stops once the buffer is empty self._fstop = threading.Event() # Force Stop. Stops immediately self._notEmpty = threading.Event() def begin(self): self.t.start() # Stop after buffer empties def stop(self): self._stop.set() # Stop even if there is audio on the buffer def fstop(self): self._fstop.set() def getLeft(self): return self.eob - time.time() # Called from outside the 'process' thread to add sound data to the buffer. def play(self, data, sndlen): if self._stop.isSet() or self._fstop.isSet(): return False # add to buffer self._blk.acquire() self.aBuffer.append(data) if len(self.aBuffer) == 1: # print "1 sound" self._notEmpty.set() self._blk.release() # Adjust buffer length variable if self.eob < time.time(): self.eob = time.time() + sndlen else: self.eob = self.eob + sndlen # threaded audio processor, waits for audio on the buffer and sends it to the snd module. # the snd module can block all it wants in this case. When the snd module un-blocks, more # sound can be fed to it (ie immediately) def process(self): # loop until stop while not self._fstop.isSet(): self._notEmpty.wait(.5) # .5 second wait, in case of fstop event if self._notEmpty.isSet(): if self._stop.isSet(): self._fstop.set() else: self._blk.acquire() data = self.aBuffer.pop(0) if len(self.aBuffer) == 0: self._notEmpty.clear() self._blk.release() # Process the data. May block, but that is okay self.snd.play( data ) # This is the internal movie player module. I kept this seperate to simplify things, and in case movie frames are not always read from a file. class MovieInternal: vfTime = 0.0 # Video Frame Period (1/frame rate). Needs to be adjusted on the fly. eaq = 0.05 # Audio Queue Time: how many seconds ahead can we plan (queue) audio? eag = 0.01 # Audio Gap Tolerance: for small gaps, just run sound together. Don't wait .001 seconds (or something) for the right time to play the next audio segment tstart = 0.0 # time at which video playback started. frame = 0 # for calculating vtime vtime_start = 0.0 aBuffer = [] # Buffer (pointers) to raw audio frames, in order vBuffer = [] # Buffer (pointers) to raw video frames, in order (?) (what about IPBB?) adecoder = None vdecoder = None callback = None # Callback Class, Implements onVideoReady( vfr ), where data is vfr.data # Get current playback time def ctime(self): return time.time() - self.tstart # Get pts of current video frame (where PTS data is not available def vtime(self, vfr): # Get expected vtime using frame rate vtime = self.frame * self.vfTime + self.vtime_start # use estimate # correct for PTS data, using averaging in case of bogus values (??) vtime2 = vfr[1] if vtime2 > 0: vtime = (vtime + vtime2)/2.0 return vtime def adata2time(self, data): return float(len(data.data))/(2*data.channels*data.sample_rate) def aBufferFull(self): return len(self.aBuffer) >= 100 def vBufferFull(self): return len(self.vBuffer) >= 100 def parse(self, data): # Parse raw mpeg file data pstream = self.demux.parse( data ) for data in pstream: if data[0] == self.video_index: self.vBuffer.append((data[1], data[ 3 ] / 90000.0)) if data[0] == self.audio_index: self.aBuffer.append((data[1], data[ 3 ] / 90000.0)) def playback_buffers(self): # play movie data # Returns time before action is needed ctime = self.ctime() t1 = self.processAudio(ctime) if t1 == 0: return 0 # If no audio was handled, try a video frame t2 = self.processVideo(ctime) if t2 == 0.0: return 0.0 # Otherwise, return the shorter time return min(t1, t2) def processAudio(self, ctime): if len(self.aBuffer) == 0: return 1.0 # time of the current raw sound atime = self.aBuffer[0][1] # How much audio is on the buffer? qtime = self.snd.getLeft() # Should deal with audio on aBuffer? # 1. is the next audio segment supposed to be played in the past? # 2. is the next audio segment supposed to be played within eaq of the present, # and would the next audio segment be played within eag of the end of the # last sound? if (ctime > atime) or (qtime > 0 and atime < ctime + self.eaq and atime < ctime + qtime + self.eag): # print "AUDIO" # Need to process audio ardata = self.aBuffer[0] adata = self.adecoder.decode( ardata[0] ) # print len(adata.data) # If there is room on the buffer # print "free" self.aBuffer.pop(0) sndlen = self.adata2time(adata) # Drop if it the start of the next sound is closer than the end of the current # sound. (but using 3/4) if ctime + qtime > atime + 3.0*sndlen / 4: print ctime, qtime, atime, sndlen print " A Delete Too Late" else: # sndarray = numpy.fromstring(adata.data) ## sndarray = numpy.transpose(numpy.vstack((sndarray, sndarray))) ##sound = pygame.sndarray.make_sound(sndarray) # sound.play() # t1 = time.time() ##self.snd.play(sound) # print "t2", time.time()-t1 self.snd.play( adata.data, sndlen ) del(ardata) del(adata) return 0.0 # when do we need action? return qtime def processVideo(self, ctime): if len(self.vBuffer) == 0: # Just deal with audio return 1.0 vtime = self.vtime(self.vBuffer[0]) if vtime < ctime: # Need to process video # Delete only one at a time: remember, audio has presedence vrdata = self.vBuffer.pop(0) vdata = self.vdecoder.decode( vrdata[0] ) if vdata != None: # correct vfTime, using an average if vdata.rate > 1000: vfTime2 = 1000.0 / vdata.rate else: vfTime2 = 1.0 / vdata.rate self.vfTime = (self.vfTime + vfTime2) / 2.0 # if PTS, use for vtime calc if vrdata[1]>0: self.vtime_start = vtime # vrdata[1] self.frame = 1 else: self.frame = self.frame + 1 # If we are on time, show the frame if (ctime - vtime) <= self.vfTime*2: self.callback.onVideoReady( vdata ) else: print " V Delete Late" del vdata del vrdata return 0.0 # When do we need action? return vtime - ctime class MovieFile(MovieInternal): filename = "" mfile = None # file object for movie file video_index = -1 # id for raw video frames audio_index = -1 # id for raw audio frames READ = 50000 # # bytes, 50000 should take about 0.005 to 0.01 seconds to read and sort demux = None # Demuxer def __init__(self, filename): self.filename = filename def play(self, vol=0xaaaa, pos=0): # first two are flags for the thread self.event_stop = False self.event_pause = False # this is to block the thread untill it is un-paused self.snd = None t = threading.Thread(None, target=self.playback, kwargs={'pos':pos, 'vol':vol}) t.start() def stop(self): self.event_stop = True def playing(self): return self.event_stop def pause(self): self.event_pause = not self.event_pause # if self.event_pause: # self.snd.setVolume(0) # else: # vol = (self.vol & 0x003f) << 8 # self.snd.setVolume(vol) def pause_fade(self): self.event_pause = not self.event_pause if self.event_pause: for i in range(self.vol, 0, -1): voli = (i & 0x003f) << 8 # self.snd.setVolume(voli) # print voli time.sleep(.005) else: vol = (self.vol & 0x003f) << 8 # self.snd.setVolume(vol) def setVolume(self, vol): # vol is from 1 to 64. No left-right control ( :<( ) self.vol = vol vol = (vol & 0x003f) << 8; # grab 7 bits, shift by 8. So bits 15-8 are set or not. # if self.snd != None: # self.snd.setVolume(vol) def playback(self, vol=0, pos=0): # open the file self.mfile = open( self.filename, 'rb' ) # create a demuxer using filename extension self.demux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower()) tempDemux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower()) # read some of the file fdata = self.mfile.read( 300000 ) pstream = tempDemux.parse( fdata ) # initialize decoders # find the audio stream for streami in range(len(tempDemux.streams)): stream = tempDemux.streams[streami] print tempDemux.streams if stream['type'] == muxer.CODEC_TYPE_VIDEO: try: # Set the initial sound delay to 0 for now # It defines initial offset from video in the beginning of the stream # self.resetVideo() # seekADelta= 0 # Setting up the HW video codec self.vdecoder = pymedia.video.ext_codecs.Decoder( stream ) print "GOT HW CODEC" except: try: # Fall back to SW video codec self.vdecoder= vcodec.Decoder( stream ) print "GOT SW CODEC" except: traceback.print_exc() print "FAILED TO INIT VIDEO CODEC" self.video_index = streami break for streami in range(len(tempDemux.streams)): stream = tempDemux.streams[streami] if stream['type'] == muxer.CODEC_TYPE_AUDIO: self.adecoder = acodec.Decoder( stream ) self.audio_index = streami break print "Video index: " + str(self.video_index) print "Audio index: " + str(self.audio_index) # decode a frame to get bitrate, etc for vdata in pstream: if vdata[0] != self.video_index: continue vfr = self.vdecoder.decode( vdata[1] ) if vfr == None: continue # WHY? break self.vdecoder.reset() if self.audio_index != -1: for vdata in pstream: if vdata[0] != self.audio_index: continue afr = self.adecoder.decode( vdata[1] ) break self.adecoder.reset() self.channels = afr.channels self.sample_rate = afr.sample_rate # print 'Opening sound', self.sample_rate, self.channels, sound.AFMT_S16_LE, 0 sndModule = sound.Output( self.sample_rate, self.channels, sound.AFMT_S16_NE ) self.snd = PlaybackBuffer(sndModule) self.snd.begin() # pygame.mixer.init(self.sample_rate, -16, self.channels, 4096) # 4096 # pygame.mixer.set_num_channels(2) # self.snd = pygame.mixer.Channel(0) # self.snd = ao.AudioDevice( # 0, # bits=16, # rate=self.sample_rate, # channels=self.channels, # byte_format=1) print "Sample rate: " + str(self.sample_rate) print "Channels: " + str(self.channels) # self.fullspace = self.snd.getSpace() self.fullspace = 0 print "FULLSPACE", self.fullspace self.setVolume(vol) # print self.snd.getVolume() # Set up output video method # self.snd = sound.Output( sdecoded.sample_rate, sdecoded.channels, sound.AFMT_S16_NE ) pygame.init() pygame.display.set_mode( vfr.size, 0 ) self.overlay = pygame.Overlay( pygame.YV12_OVERLAY, vfr.size ) # set overlay loc? # Will need to adjust for aspect # if vfr.aspect_ratio> .0: # self.pictureSize= ( vfr.size[ 1 ]* vfr.aspect_ratio, vfr.size[ 1 ] ) # else: # self.pictureSize= vfr.size print "vfr info: " + str(vfr) print dir(vfr) print vfr.rate # frames/second. Each vfr is a frame. print vfr.bitrate print vfr.aspect_ratio if vfr.rate > 1000: self.vfTime = 1000.0 / vfr.rate else: self.vfTime = 1.0 / vfr.rate self.tstart = time.time() - pos self.callback = self # Now I can trash the temporary muxer, and do things properly del(tempDemux) self.parse(fdata) file_ended = False while not self.event_stop: # Process audio/video, or read or sleep if len(self.aBuffer) == 0 or len(self.vBuffer) == 0: if not self.read(): file_ended = True if len(self.aBuffer) == 0: self.event_stop = True continue stime = self.playback_buffers() # "freetime" if stime > 0: if not self.vBufferFull() and not self.aBufferFull(): # print "READ" if not self.read(): file_ended = True else: # print " Sleep", stime # Sleep until a new frame is needed time.sleep(stime/2.0) if len(self.aBuffer) == 0: self.snd.stop() else: self.snd.fstop() self.event_stop = True print len(self.aBuffer) def read(self): # read and parse new data fdata = self.mfile.read(self.READ) if len(fdata) > 0: self.parse(fdata) return True else: return False # Display a video frame def onVideoReady(self, vfr): if vfr.data != None: self.overlay.display( vfr.data ) # External movie player class. To be replaced to fit in AoI class m_movie(m_movie_internal): filename = "" mfile = None # file object for movie file video_index = -1 # id for raw video frames audio_index = -1 # id for raw audio frames READ = 50000 # # bytes, 50000 should take about 0.005 to 0.01 seconds to read and sort demux = None # Demuxer def __init__(self, filename): self.filename = filename def play(self, vol=0xaaaa, pos=0): # first two are flags for the thread self.event_stop = False self.event_pause = False # this is to block the thread untill it is un-paused self.snd = None self.Event_pauseEnd = threading.Event() t = threading.Thread(None, target=self.playback, kwargs={'pos':pos, 'vol':vol}) t.start() def stop(self): self.event_stop = True def pause(self): self.event_pause = not self.event_pause # if self.event_pause: # self.snd.setVolume(0) # else: # vol = (self.vol & 0x003f) << 8 # self.snd.setVolume(vol) def pause_fade(self): self.event_pause = not self.event_pause if self.event_pause: for i in range(self.vol, 0, -1): voli = (i & 0x003f) << 8 # self.snd.setVolume(voli) # print voli time.sleep(.005) else: vol = (self.vol & 0x003f) << 8 # self.snd.setVolume(vol) def setVolume(self, vol): # vol is from 1 to 64. No left-right control ( :<( ) self.vol = vol vol = (vol & 0x003f) << 8; # grab 7 bits, shift by 8. So bits 15-8 are set or not. # if self.snd != None: # self.snd.setVolume(vol) def playback(self, vol=0, pos=0): # open the file self.mfile = open( self.filename, 'rb' ) # create a demuxer using filename extension self.demux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower()) tempDemux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower()) # read some of the file fdata = self.mfile.read( 300000 ) pstream = tempDemux.parse( fdata ) # initialize decoders # find the audio stream for streami in range(len(tempDemux.streams)): stream = tempDemux.streams[streami] print tempDemux.streams if stream['type'] == muxer.CODEC_TYPE_VIDEO: try: # Set the initial sound delay to 0 for now # It defines initial offset from video in the beginning of the stream # self.resetVideo() # seekADelta= 0 # Setting up the HW video codec self.vdecoder = pymedia.video.ext_codecs.Decoder( stream ) print "GOT HW CODEC" except: try: # Fall back to SW video codec self.vdecoder= vcodec.Decoder( stream ) print "GOT SW CODEC" except: traceback.print_exc() print "FAILED TO INIT VIDEO CODEC" self.video_index = streami break for streami in range(len(tempDemux.streams)): stream = tempDemux.streams[streami] if stream['type'] == muxer.CODEC_TYPE_AUDIO: self.adecoder = acodec.Decoder( stream ) self.audio_index = streami break print "Video index: " + str(self.video_index) print "Audio index: " + str(self.audio_index) # decode a frame to get bitrate, etc for vdata in pstream: if vdata[0] != self.video_index: continue vfr = self.vdecoder.decode( vdata[1] ) if vfr == None: continue # WHY? break self.vdecoder.reset() if self.audio_index != -1: for vdata in pstream: if vdata[0] != self.audio_index: continue afr = self.adecoder.decode( vdata[1] ) break self.adecoder.reset() self.channels = afr.channels self.sample_rate = afr.sample_rate # print 'Opening sound', self.sample_rate, self.channels, sound.AFMT_S16_LE, 0 sndModule = sound.Output( self.sample_rate, self.channels, sound.AFMT_S16_NE ) self.snd = PlaybackBuffer(sndModule) self.snd.begin() # pygame.mixer.init(self.sample_rate, -16, self.channels, 4096) # 4096 # pygame.mixer.set_num_channels(2) # self.snd = pygame.mixer.Channel(0) # self.snd = ao.AudioDevice( # 0, # bits=16, # rate=self.sample_rate, # channels=self.channels, # byte_format=1) print "Sample rate: " + str(self.sample_rate) print "Channels: " + str(self.channels) # self.fullspace = self.snd.getSpace() self.fullspace = 0 print "FULLSPACE", self.fullspace self.setVolume(vol) # print self.snd.getVolume() # Set up output video method # self.snd = sound.Output( sdecoded.sample_rate, sdecoded.channels, sound.AFMT_S16_NE ) pygame.init() pygame.display.set_mode( vfr.size, 0 ) self.overlay = pygame.Overlay( pygame.YV12_OVERLAY, vfr.size ) # set overlay loc? # Will need to adjust for aspect # if vfr.aspect_ratio> .0: # self.pictureSize= ( vfr.size[ 1 ]* vfr.aspect_ratio, vfr.size[ 1 ] ) # else: # self.pictureSize= vfr.size print "vfr info: " + str(vfr) print dir(vfr) print vfr.rate # frames/second. Each vfr is a frame. print vfr.bitrate print vfr.aspect_ratio if vfr.rate > 1000: self.vfTime = 1000.0 / vfr.rate else: self.vfTime = 1.0 / vfr.rate self.tstart = time.time() - pos self.callback = self # Now I can trash the temporary muxer, and do things properly del(tempDemux) self.parse(fdata) file_ended = False while not self.event_stop: # Process audio/video, or read or sleep if len(self.aBuffer) == 0 or len(self.vBuffer) == 0: if not self.read(): file_ended = True if len(self.aBuffer) == 0: self.event_stop = True continue stime = self.playback_buffers() # "freetime" if stime > 0: if not self.vBufferFull() and not self.aBufferFull(): # print "READ" if not self.read(): file_ended = True else: # print " Sleep", stime # Sleep until a new frame is needed time.sleep(stime/2.0) if len(self.aBuffer) == 0: self.snd.stop() else: self.snd.fstop() print len(self.aBuffer) def read(self): # read and parse new data fdata = self.mfile.read(self.READ) if len(fdata) > 0: self.parse(fdata) return True else: return False # Display a video frame def onVideoReady(self, vfr): if vfr.data != None: self.overlay.display( vfr.data )
lbeltrame/bcbio-nextgen
refs/heads/master
bcbio/cwl/tool.py
4
"""Run bcbio generated CWL with a supported tool. Handles wrapping and integrating with multiple tools making it easier to run bcbio in a standard way in many environments. """ from __future__ import print_function import glob import json import os import shutil import subprocess import sys from bcbio import utils from bcbio.cwl import hpc from bcbio.distributed import objectstore def _get_main_and_json(directory): """Retrieve the main CWL and sample JSON files from a bcbio generated directory. """ directory = os.path.normpath(os.path.abspath(directory)) checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl")) if checker_main and os.path.exists(checker_main): main_cwl = [checker_main] else: main_cwl = glob.glob(os.path.join(directory, "main-*.cwl")) main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0] assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory main_json = glob.glob(os.path.join(directory, "main-*-samples.json")) assert len(main_json) == 1, "Did not find main json in %s" % directory project_name = os.path.basename(directory).split("-workflow")[0] return main_cwl[0], main_json[0], project_name def _run_tool(cmd, use_container=True, work_dir=None, log_file=None): """Run with injection of bcbio path. Place at end for runs without containers to avoid overriding other bcbio installations. """ if isinstance(cmd, (list, tuple)): cmd = " ".join([str(x) for x in cmd]) cmd = utils.local_path_export(at_start=use_container) + cmd if log_file: cmd += " 2>&1 | tee -a %s" % log_file try: print("Running: %s" % cmd) subprocess.check_call(cmd, shell=True) finally: if use_container and work_dir: _chown_workdir(work_dir) def _pack_cwl(unpacked_cwl): """Pack CWL into a single document for submission. """ out_file = "%s-pack%s" % os.path.splitext(unpacked_cwl) cmd = "cwltool --pack {unpacked_cwl} > {out_file}" _run_tool(cmd.format(**locals())) return out_file def _chown_workdir(work_dir): """Ensure work directory files owned by original user. Docker runs can leave root owned files making cleanup difficult. Skips this if it fails, avoiding errors where we run remotely and don't have docker locally. """ cmd = ("""docker run --rm -v %s:%s quay.io/bcbio/bcbio-base /bin/bash -c 'chown -R %s %s'""" % (work_dir, work_dir, os.getuid(), work_dir)) try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError: pass def _remove_bcbiovm_path(): """Avoid referencing minimal bcbio_nextgen in bcbio_vm installation. """ cur_path = os.path.dirname(os.path.realpath(sys.executable)) paths = os.environ["PATH"].split(":") if cur_path in paths: paths.remove(cur_path) os.environ["PATH"] = ":".join(paths) def _run_cwltool(args): """Run with cwltool -- reference implementation. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cwltool_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpcwl")) log_file = os.path.join(work_dir, "%s-cwltool.log" % project_name) os.environ["TMPDIR"] = tmp_dir flags = ["--tmpdir-prefix", tmp_dir, "--tmp-outdir-prefix", tmp_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "--preserve-environment", "HOME"] cmd = ["cwltool"] + flags + args.toolargs + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file=log_file) def _run_arvados(args): """Run CWL on Arvados. """ assert not args.no_container, "Arvados runs require containers" assert "ARVADOS_API_TOKEN" in os.environ and "ARVADOS_API_HOST" in os.environ, \ "Need to set ARVADOS_API_TOKEN and ARVADOS_API_HOST in environment to run" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = ["--enable-reuse", "--api", "containers", "--submit", "--no-wait"] cmd = ["arvados-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd) def _run_toil(args): """Run CWL with Toil. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "toil_work")) tmp_dir = utils.safe_makedir(os.path.join(work_dir, "tmpdir")) os.environ["TMPDIR"] = tmp_dir log_file = os.path.join(work_dir, "%s-toil.log" % project_name) jobstore = os.path.join(work_dir, "cwltoil_jobstore") flags = ["--jobStore", jobstore, "--logFile", log_file, "--workDir", tmp_dir, "--linkImports"] if os.path.exists(jobstore): flags += ["--restart"] # caching causes issues for batch systems if "--batchSystem" in args.toolargs: flags += ["--disableCaching"] flags += args.toolargs if args.no_container: _remove_bcbiovm_path() flags += ["--no-container", "--preserve-environment", "PATH", "HOME"] cmd = ["cwltoil"] + flags + ["--", main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir) for tmpdir in (glob.glob(os.path.join(work_dir, "out_tmpdir*")) + glob.glob(os.path.join(work_dir, "tmp*"))): if os.path.isdir(tmpdir): shutil.rmtree(tmpdir) def _run_bunny(args): """Run CWL with rabix bunny. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work")) flags = ["-b", work_dir] log_file = os.path.join(work_dir, "%s-bunny.log" % project_name) if os.path.exists(work_dir): caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir) if os.path.isdir(os.path.join(work_dir, d))] if caches: flags += ["--cache-dir", max(caches, key=os.path.getmtime)] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) def _run_wes(args): """Run CWL using a Workflow Execution Service (WES) endpoint """ main_file, json_file, project_name = _get_main_and_json(args.directory) main_file = _pack_cwl(main_file) if args.host and "stratus" in args.host: _run_wes_stratus(args, main_file, json_file) else: opts = ["--no-wait"] if args.host: opts += ["--host", args.host] if args.auth: opts += ["--auth", args.auth] cmd = ["wes-client"] + opts + [main_file, json_file] _run_tool(cmd) def _run_wes_stratus(args, main_file, json_file): """Run WES on Illumina stratus endpoint server, which wes-client doesn't support. https://stratus-docs.readme.io/docs/quick-start-4 """ import requests base_url = args.host if not base_url.startswith("http"): base_url = "https://%s" % base_url with open(main_file) as in_handle: r = requests.post("%s/v1/workflows" % base_url, headers={"Content-Type": "application/json", "Authorization": "Bearer %s" % args.auth}, data=in_handle.read()) print(r.status_code) print(r.text) def _estimate_runner_memory(json_file): """Estimate Java memory requirements based on number of samples. A rough approach to selecting correct allocated memory for Cromwell. """ with open(json_file) as in_handle: sinfo = json.load(in_handle) num_parallel = 1 for key in ["config__algorithm__variantcaller", "description"]: item_counts = [] n = 0 for val in (sinfo.get(key) or []): n += 1 if val: if isinstance(val, (list, tuple)): item_counts.append(len(val)) else: item_counts.append(1) print(key, n, item_counts) if n and item_counts: num_parallel = n * max(item_counts) break if num_parallel < 25: return "3g" if num_parallel < 150: return "6g" elif num_parallel < 500: return "12g" else: return "24g" def _run_cromwell(args): """Run CWL with Cromwell. """ main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work")) final_dir = utils.safe_makedir(os.path.join(work_dir, "final")) if args.no_container: _remove_bcbiovm_path() log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name) metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name) option_file = os.path.join(work_dir, "%s-options.json" % project_name) cromwell_opts = {"final_workflow_outputs_dir": final_dir, "default_runtime_attributes": {"bootDiskSizeGb": 20}} with open(option_file, "w") as out_handle: json.dump(cromwell_opts, out_handle) cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file), "run", "--type", "CWL", "-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)] cmd += hpc.args_to_cromwell_cl(args) cmd += ["--metadata-output", metadata_file, "--options", option_file, "--inputs", json_file, main_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) if metadata_file and utils.file_exists(metadata_file): with open(metadata_file) as in_handle: metadata = json.load(in_handle) if metadata["status"] == "Failed": _cromwell_debug(metadata) sys.exit(1) else: _cromwell_move_outputs(metadata, final_dir) def _cromwell_debug(metadata): """Format Cromwell failures to make debugging easier. """ def get_failed_calls(cur, key=None): if key is None: key = [] out = [] if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur: out.append((key, cur)) elif isinstance(cur, dict): for k, v in cur.items(): out.extend(get_failed_calls(v, key + [k])) elif isinstance(cur, (list, tuple)): for i, v in enumerate(cur): out.extend(get_failed_calls(v, key + [i])) return out print("Failed bcbio Cromwell run") print("-------------------------") for fail_k, fail_call in get_failed_calls(metadata["calls"]): root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"])) print("Failure in step: %s" % ".".join([str(x) for x in fail_k])) print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log")) print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-commands.log")) print(" Cromwell directory : %s" % root_dir) print() def _cromwell_move_outputs(metadata, final_dir): """Move Cromwell outputs to the final upload directory. """ sample_key = [k for k in metadata["outputs"].keys() if k.endswith(("rgnames__sample", "rgnames__sample_out"))][0] project_dir = utils.safe_makedir(os.path.join(final_dir, "project")) samples = metadata["outputs"][sample_key] def _copy_with_secondary(f, dirname): if len(f["secondaryFiles"]) > 1: dirname = utils.safe_makedir(os.path.join(dirname, os.path.basename(os.path.dirname(f["location"])))) if not objectstore.is_remote(f["location"]): finalf = os.path.join(dirname, os.path.basename(f["location"])) if not utils.file_uptodate(finalf, f["location"]): shutil.copy(f["location"], dirname) [_copy_with_secondary(sf, dirname) for sf in f["secondaryFiles"]] def _write_to_dir(val, dirname): if isinstance(val, (list, tuple)): [_write_to_dir(v, dirname) for v in val] else: _copy_with_secondary(val, dirname) for k, vals in metadata["outputs"].items(): if k != sample_key: if k.endswith(("summary__multiqc")): vs = [v for v in vals if v] assert len(vs) == 1 _write_to_dir(vs[0], project_dir) elif len(vals) == len(samples): for s, v in zip(samples, vals): if v: _write_to_dir(v, utils.safe_makedir(os.path.join(final_dir, s))) elif len(vals) == 1: _write_to_dir(vals[0], project_dir) elif len(vals) > 0: raise ValueError("Unexpected sample and outputs: %s %s %s" % (k, samples, vals)) def _run_sbgenomics(args): """Run CWL on SevenBridges platform and Cancer Genomics Cloud. """ assert not args.no_container, "Seven Bridges runs require containers" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = [] cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd) def _run_funnel(args): """Run funnel TES server with rabix bunny for CWL. """ host = "localhost" port = "8088" main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "funnel_work")) log_file = os.path.join(work_dir, "%s-funnel.log" % project_name) # Create bunny configuration directory with TES backend orig_config_dir = os.path.join(os.path.dirname(os.path.realpath(utils.which("rabix"))), "config") work_config_dir = utils.safe_makedir(os.path.join(work_dir, "rabix_config")) for fname in os.listdir(orig_config_dir): if fname == "core.properties": with open(os.path.join(orig_config_dir, fname)) as in_handle: with open(os.path.join(work_config_dir, fname), "w") as out_handle: for line in in_handle: if line.startswith("backend.embedded.types"): line = "backend.embedded.types=TES\n" out_handle.write(line) else: shutil.copy(os.path.join(orig_config_dir, fname), os.path.join(work_config_dir, fname)) flags = ["-c", work_config_dir, "-tes-url=http://%s:%s" % (host, port), "-tes-storage=%s" % work_dir] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] funnelp = subprocess.Popen(["funnel", "server", "run", "--Server.HostName", host, "--Server.HTTPPort", port, "--LocalStorage.AllowedDirs", work_dir, "--Worker.WorkDir", os.path.join(work_dir, "funnel-work")]) try: with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file) finally: funnelp.kill() _TOOLS = {"cwltool": _run_cwltool, "cromwell": _run_cromwell, "arvados": _run_arvados, "sbg": _run_sbgenomics, "toil": _run_toil, "bunny": _run_bunny, "funnel": _run_funnel, "wes": _run_wes} def run(args): _TOOLS[args.tool](args)
vishdha/erpnext
refs/heads/develop
erpnext/maintenance/doctype/__init__.py
12133432
thomasgilgenast/spqr-nonrel
refs/heads/master
django/conf/locale/fr/__init__.py
12133432
imtapps/django-imt-fork
refs/heads/IMT
tests/regressiontests/i18n/other/locale/de/formats.py
12133432
vivyly/fancastic_17
refs/heads/master
fancastic_17/common/migrations/__init__.py
12133432
barseghyanartur/oauthlib
refs/heads/master
tests/oauth2/rfc6749/grant_types/test_client_credentials.py
24
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from ....unittest import TestCase import json import mock from oauthlib.common import Request from oauthlib.oauth2.rfc6749.grant_types import ClientCredentialsGrant from oauthlib.oauth2.rfc6749.tokens import BearerToken class ClientCredentialsGrantTest(TestCase): def setUp(self): mock_client = mock.MagicMock() mock_client.user.return_value = 'mocked user' self.request = Request('http://a.b/path') self.request.grant_type = 'client_credentials' self.request.client = mock_client self.request.scopes = ('mocked', 'scopes') self.mock_validator = mock.MagicMock() self.auth = ClientCredentialsGrant( request_validator=self.mock_validator) def test_create_token_response(self): bearer = BearerToken(self.mock_validator) headers, body, status_code = self.auth.create_token_response( self.request, bearer) token = json.loads(body) self.assertIn('access_token', token) self.assertIn('token_type', token) self.assertIn('expires_in', token) self.assertIn('Content-Type', headers) self.assertEqual(headers['Content-Type'], 'application/json') def test_error_response(self): bearer = BearerToken(self.mock_validator) self.mock_validator.authenticate_client.return_value = False headers, body, status_code = self.auth.create_token_response( self.request, bearer) error_msg = json.loads(body) self.assertIn('error', error_msg) self.assertEqual(error_msg['error'], 'invalid_client') self.assertIn('Content-Type', headers) self.assertEqual(headers['Content-Type'], 'application/json') def test_validate_token_response(self): # wrong grant type, scope pass
firebitsbr/infernal-twin
refs/heads/master
build/pip/build/lib.linux-i686-2.7/pip/_vendor/distlib/_backport/misc.py
1428
# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Backports for individual classes and functions.""" import os import sys __all__ = ['cache_from_source', 'callable', 'fsencode'] try: from imp import cache_from_source except ImportError: def cache_from_source(py_file, debug=__debug__): ext = debug and 'c' or 'o' return py_file + ext try: callable = callable except NameError: from collections import Callable def callable(obj): return isinstance(obj, Callable) try: fsencode = os.fsencode except AttributeError: def fsencode(filename): if isinstance(filename, bytes): return filename elif isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
Jumpscale/go-raml
refs/heads/master
docs/tutorial/python/sanic/types/User.py
4
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml. """ Auto-generated class for User """ from six import string_types from . import client_support class User(object): """ auto-generated. don't touch. """ @staticmethod def create(**kwargs): """ :type name: string_types :type username: string_types :rtype: User """ return User(**kwargs) def __init__(self, json=None, **kwargs): if json is None and not kwargs: raise ValueError('No data or kwargs present') class_name = 'User' data = json or kwargs # set attributes data_types = [string_types] self.name = client_support.set_property('name', data, data_types, False, [], False, True, class_name) data_types = [string_types] self.username = client_support.set_property('username', data, data_types, False, [], False, True, class_name) def __str__(self): return self.as_json(indent=4) def as_json(self, indent=0): return client_support.to_json(self, indent=indent) def as_dict(self): return client_support.to_dict(self)
pyramania/scipy
refs/heads/master
scipy/_lib/decorator.py
41
# ######################### LICENSE ############################ # # Copyright (c) 2005-2015, Michele Simionato # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # Redistributions in bytecode form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. """ Decorator module, see http://pypi.python.org/pypi/decorator for the documentation. """ from __future__ import print_function import re import sys import inspect import operator import itertools import collections __version__ = '4.0.5' if sys.version >= '3': from inspect import getfullargspec def get_init(cls): return cls.__init__ else: class getfullargspec(object): "A quick and dirty replacement for getfullargspec for Python 2.X" def __init__(self, f): self.args, self.varargs, self.varkw, self.defaults = \ inspect.getargspec(f) self.kwonlyargs = [] self.kwonlydefaults = None def __iter__(self): yield self.args yield self.varargs yield self.varkw yield self.defaults getargspec = inspect.getargspec def get_init(cls): return cls.__init__.__func__ # getargspec has been deprecated in Python 3.5 ArgSpec = collections.namedtuple( 'ArgSpec', 'args varargs varkw defaults') def getargspec(f): """A replacement for inspect.getargspec""" spec = getfullargspec(f) return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults) DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(') # basic functionality class FunctionMaker(object): """ An object with the ability to create functions with a given signature. It has attributes name, doc, module, signature, defaults, dict and methods update and make. """ # Atomic get-and-increment provided by the GIL _compile_count = itertools.count() def __init__(self, func=None, name=None, signature=None, defaults=None, doc=None, module=None, funcdict=None): self.shortsignature = signature if func: # func can be a class or a callable, but not an instance method self.name = func.__name__ if self.name == '<lambda>': # small hack for lambda functions self.name = '_lambda_' self.doc = func.__doc__ self.module = func.__module__ if inspect.isfunction(func): argspec = getfullargspec(func) self.annotations = getattr(func, '__annotations__', {}) for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults'): setattr(self, a, getattr(argspec, a)) for i, arg in enumerate(self.args): setattr(self, 'arg%d' % i, arg) if sys.version < '3': # easy way self.shortsignature = self.signature = ( inspect.formatargspec( formatvalue=lambda val: "", *argspec)[1:-1]) else: # Python 3 way allargs = list(self.args) allshortargs = list(self.args) if self.varargs: allargs.append('*' + self.varargs) allshortargs.append('*' + self.varargs) elif self.kwonlyargs: allargs.append('*') # single star syntax for a in self.kwonlyargs: allargs.append('%s=None' % a) allshortargs.append('%s=%s' % (a, a)) if self.varkw: allargs.append('**' + self.varkw) allshortargs.append('**' + self.varkw) self.signature = ', '.join(allargs) self.shortsignature = ', '.join(allshortargs) self.dict = func.__dict__.copy() # func=None happens when decorating a caller if name: self.name = name if signature is not None: self.signature = signature if defaults: self.defaults = defaults if doc: self.doc = doc if module: self.module = module if funcdict: self.dict = funcdict # check existence required attributes assert hasattr(self, 'name') if not hasattr(self, 'signature'): raise TypeError('You are decorating a non function: %s' % func) def update(self, func, **kw): "Update the signature of func with the data in self" func.__name__ = self.name func.__doc__ = getattr(self, 'doc', None) func.__dict__ = getattr(self, 'dict', {}) func.__defaults__ = getattr(self, 'defaults', ()) func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) func.__annotations__ = getattr(self, 'annotations', None) try: frame = sys._getframe(3) except AttributeError: # for IronPython and similar implementations callermodule = '?' else: callermodule = frame.f_globals.get('__name__', '?') func.__module__ = getattr(self, 'module', callermodule) func.__dict__.update(kw) def make(self, src_templ, evaldict=None, addsource=False, **attrs): "Make a new function from a given template and update the signature" src = src_templ % vars(self) # expand name and signature evaldict = evaldict or {} mo = DEF.match(src) if mo is None: raise SyntaxError('not a valid function template\n%s' % src) name = mo.group(1) # extract the function name names = set([name] + [arg.strip(' *') for arg in self.shortsignature.split(',')]) for n in names: if n in ('_func_', '_call_'): raise NameError('%s is overridden in\n%s' % (n, src)) if not src.endswith('\n'): # add a newline just for safety src += '\n' # this is needed in old versions of Python # Ensure each generated function has a unique filename for profilers # (such as cProfile) that depend on the tuple of (<filename>, # <definition line>, <function name>) being unique. filename = '<decorator-gen-%d>' % (next(self._compile_count),) try: code = compile(src, filename, 'single') exec(code, evaldict) except: print('Error in generated code:', file=sys.stderr) print(src, file=sys.stderr) raise func = evaldict[name] if addsource: attrs['__source__'] = src self.update(func, **attrs) return func @classmethod def create(cls, obj, body, evaldict, defaults=None, doc=None, module=None, addsource=True, **attrs): """ Create a function from the strings name, signature and body. evaldict is the evaluation dictionary. If addsource is true an attribute __source__ is added to the result. The attributes attrs are added, if any. """ if isinstance(obj, str): # "name(signature)" name, rest = obj.strip().split('(', 1) signature = rest[:-1] # strip a right parens func = None else: # a function name = None signature = None func = obj self = cls(func, name, signature, defaults, doc, module) ibody = '\n'.join(' ' + line for line in body.splitlines()) return self.make('def %(name)s(%(signature)s):\n' + ibody, evaldict, addsource, **attrs) def decorate(func, caller): """ decorate(func, caller) decorates a function using a caller. """ evaldict = func.__globals__.copy() evaldict['_call_'] = caller evaldict['_func_'] = func fun = FunctionMaker.create( func, "return _call_(_func_, %(shortsignature)s)", evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun def decorator(caller, _func=None): """decorator(caller) converts a caller function into a decorator""" if _func is not None: # return a decorated function # this is obsolete behavior; you should use decorate instead return decorate(_func, caller) # else return a decorator function if inspect.isclass(caller): name = caller.__name__.lower() callerfunc = get_init(caller) doc = 'decorator(%s) converts functions/generators into ' \ 'factories of %s objects' % (caller.__name__, caller.__name__) elif inspect.isfunction(caller): if caller.__name__ == '<lambda>': name = '_lambda_' else: name = caller.__name__ callerfunc = caller doc = caller.__doc__ else: # assume caller is an object with a __call__ method name = caller.__class__.__name__.lower() callerfunc = caller.__call__.__func__ doc = caller.__call__.__doc__ evaldict = callerfunc.__globals__.copy() evaldict['_call_'] = caller evaldict['_decorate_'] = decorate return FunctionMaker.create( '%s(func)' % name, 'return _decorate_(func, _call_)', evaldict, doc=doc, module=caller.__module__, __wrapped__=caller) # ####################### contextmanager ####################### # try: # Python >= 3.2 from contextlib import _GeneratorContextManager except ImportError: # Python >= 2.5 from contextlib import GeneratorContextManager as _GeneratorContextManager class ContextManager(_GeneratorContextManager): def __call__(self, func): """Context manager decorator""" return FunctionMaker.create( func, "with _self_: return _func_(%(shortsignature)s)", dict(_self_=self, _func_=func), __wrapped__=func) init = getfullargspec(_GeneratorContextManager.__init__) n_args = len(init.args) if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7 def __init__(self, g, *a, **k): return _GeneratorContextManager.__init__(self, g(*a, **k)) ContextManager.__init__ = __init__ elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4 pass elif n_args == 4: # (self, gen, args, kwds) Python 3.5 def __init__(self, g, *a, **k): return _GeneratorContextManager.__init__(self, g, a, k) ContextManager.__init__ = __init__ contextmanager = decorator(ContextManager) # ############################ dispatch_on ############################ # def append(a, vancestors): """ Append ``a`` to the list of the virtual ancestors, unless it is already included. """ add = True for j, va in enumerate(vancestors): if issubclass(va, a): add = False break if issubclass(a, va): vancestors[j] = a add = False if add: vancestors.append(a) # inspired from simplegeneric by P.J. Eby and functools.singledispatch def dispatch_on(*dispatch_args): """ Factory of decorators turning a function into a generic function dispatching on the given arguments. """ assert dispatch_args, 'No dispatch args passed' dispatch_str = '(%s,)' % ', '.join(dispatch_args) def check(arguments, wrong=operator.ne, msg=''): """Make sure one passes the expected number of arguments""" if wrong(len(arguments), len(dispatch_args)): raise TypeError('Expected %d arguments, got %d%s' % (len(dispatch_args), len(arguments), msg)) def gen_func_dec(func): """Decorator turning a function into a generic function""" # first check the dispatch arguments argset = set(getfullargspec(func).args) if not set(dispatch_args) <= argset: raise NameError('Unknown dispatch arguments %s' % dispatch_str) typemap = {} def vancestors(*types): """ Get a list of sets of virtual ancestors for the given types """ check(types) ras = [[] for _ in range(len(dispatch_args))] for types_ in typemap: for t, type_, ra in zip(types, types_, ras): if issubclass(t, type_) and type_ not in t.__mro__: append(type_, ra) return [set(ra) for ra in ras] def ancestors(*types): """ Get a list of virtual MROs, one for each type """ check(types) lists = [] for t, vas in zip(types, vancestors(*types)): n_vas = len(vas) if n_vas > 1: raise RuntimeError( 'Ambiguous dispatch for %s: %s' % (t, vas)) elif n_vas == 1: va, = vas mro = type('t', (t, va), {}).__mro__[1:] else: mro = t.__mro__ lists.append(mro[:-1]) # discard t and object return lists def register(*types): """ Decorator to register an implementation for the given types """ check(types) def dec(f): check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) typemap[types] = f return f return dec def dispatch_info(*types): """ An utility to introspect the dispatch algorithm """ check(types) lst = [] for anc in itertools.product(*ancestors(*types)): lst.append(tuple(a.__name__ for a in anc)) return lst def _dispatch(dispatch_args, *args, **kw): types = tuple(type(arg) for arg in dispatch_args) try: # fast path f = typemap[types] except KeyError: pass else: return f(*args, **kw) combinations = itertools.product(*ancestors(*types)) next(combinations) # the first one has been already tried for types_ in combinations: f = typemap.get(types_) if f is not None: return f(*args, **kw) # else call the default implementation return func(*args, **kw) return FunctionMaker.create( func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, dict(_f_=_dispatch), register=register, default=func, typemap=typemap, vancestors=vancestors, ancestors=ancestors, dispatch_info=dispatch_info, __wrapped__=func) gen_func_dec.__name__ = 'dispatch_on' + dispatch_str return gen_func_dec
simone-f/compare-to-osm
refs/heads/master
rendering/generate_tiles.py
1
#!/usr/bin/env python from math import pi,cos,sin,log,exp,atan from subprocess import call import sys, os from Queue import Queue import threading # try: # import mapnik2 as mapnik # except: import mapnik DEG_TO_RAD = pi/180 RAD_TO_DEG = 180/pi # Default number of rendering threads to spawn, should be roughly equal to number of CPU cores available NUM_THREADS = 4 def minmax (a,b,c): a = max(a,b) a = min(a,c) return a class GoogleProjection: def __init__(self,levels=18): self.Bc = [] self.Cc = [] self.zc = [] self.Ac = [] c = 256 for d in range(0,levels): e = c/2; self.Bc.append(c/360.0) self.Cc.append(c/(2 * pi)) self.zc.append((e,e)) self.Ac.append(c) c *= 2 def fromLLtoPixel(self,ll,zoom): d = self.zc[zoom] e = round(d[0] + ll[0] * self.Bc[zoom]) f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999) g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom]) return (e,g) def fromPixelToLL(self,px,zoom): e = self.zc[zoom] f = (px[0] - e[0])/self.Bc[zoom] g = (px[1] - e[1])/-self.Cc[zoom] h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi) return (f,h) class RenderThread: def __init__(self, tile_dir, mapfile, q, printLock, maxZoom): self.tile_dir = tile_dir self.q = q self.m = mapnik.Map(256, 256) self.printLock = printLock # Load style XML mapnik.load_map(self.m, mapfile, True) # Obtain <Map> projection self.prj = mapnik.Projection(self.m.srs) # Projects between tile pixel co-ordinates and LatLong (EPSG:4326) self.tileproj = GoogleProjection(maxZoom+1) def render_tile(self, tile_uri, x, y, z): # Calculate pixel positions of bottom-left & top-right p0 = (x * 256, (y + 1) * 256) p1 = ((x + 1) * 256, y * 256) # Convert to LatLong (EPSG:4326) l0 = self.tileproj.fromPixelToLL(p0, z); l1 = self.tileproj.fromPixelToLL(p1, z); # Convert to map projection (e.g. mercator co-ords EPSG:900913) c0 = self.prj.forward(mapnik.Coord(l0[0],l0[1])) c1 = self.prj.forward(mapnik.Coord(l1[0],l1[1])) # Bounding box for the tile if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800: bbox = mapnik.Box2d(c0.x,c0.y, c1.x,c1.y) else: bbox = mapnik.Envelope(c0.x,c0.y, c1.x,c1.y) render_size = 256 self.m.resize(render_size, render_size) self.m.zoom_to_box(bbox) if(self.m.buffer_size < 128): self.m.buffer_size = 128 # Render image with default Agg renderer im = mapnik.Image(render_size, render_size) mapnik.render(self.m, im) if len(im.tostring('png256')) != 116: im.save(tile_uri, 'png256:z=1') def loop(self): while True: #Fetch a tile from the queue and render it r = self.q.get() if (r == None): self.q.task_done() break else: (name, tile_uri, x, y, z) = r exists= "" if os.path.isfile(tile_uri): exists= "exists" else: self.render_tile(tile_uri, x, y, z) """bytes=os.stat(tile_uri)[6] empty= '' if bytes == 103: empty = " Empty Tile """ self.printLock.acquire() #print name, ":", z, x, y, exists#, empty self.printLock.release() self.q.task_done() def render_tiles(bbox, mapfile, tile_dir, minZoom=1,maxZoom=18, name="unknown", num_threads=NUM_THREADS, tms_scheme=False): print "render_tiles(",bbox, mapfile, tile_dir, minZoom,maxZoom, name,")\n..." # Launch rendering threads queue = Queue(32) printLock = threading.Lock() renderers = {} for i in range(num_threads): renderer = RenderThread(tile_dir, mapfile, queue, printLock, maxZoom) render_thread = threading.Thread(target=renderer.loop) render_thread.start() #print "Started render thread %s" % render_thread.getName() renderers[i] = render_thread if not os.path.isdir(tile_dir): os.mkdir(tile_dir) gprj = GoogleProjection(maxZoom+1) ll0 = (bbox[0],bbox[3]) ll1 = (bbox[2],bbox[1]) for z in range(minZoom,maxZoom + 1): px0 = gprj.fromLLtoPixel(ll0,z) px1 = gprj.fromLLtoPixel(ll1,z) # check if we have directories in place zoom = "%s" % z if not os.path.isdir(tile_dir + zoom): os.mkdir(tile_dir + zoom) for x in range(int(px0[0]/256.0),int(px1[0]/256.0)+1): # Validate x co-ordinate if (x < 0) or (x >= 2**z): continue # check if we have directories in place str_x = "%s" % x if not os.path.isdir(tile_dir + zoom + '/' + str_x): os.mkdir(tile_dir + zoom + '/' + str_x) for y in range(int(px0[1]/256.0),int(px1[1]/256.0)+1): # Validate x co-ordinate if (y < 0) or (y >= 2**z): continue # flip y to match OSGEO TMS spec if tms_scheme: str_y = "%s" % ((2**z-1) - y) else: str_y = "%s" % y tile_uri = tile_dir + zoom + '/' + str_x + '/' + str_y + '.png' # Submit tile to be rendered into the queue t = (name, tile_uri, x, y, z) try: queue.put(t) except KeyboardInterrupt: raise SystemExit("Ctrl-c detected, exiting...") # Signal render threads to exit by sending empty request to queue for i in range(num_threads): queue.put(None) # wait for pending rendering jobs to complete queue.join() for i in range(num_threads): renderers[i].join() if __name__ == "__main__": #------------------------------------------------------------------------- # # Change the following for different bounding boxes and zoom levels # # Start with an overview # World bbox = (-180.0,-90.0, 180.0,90.0) render_tiles(bbox, mapfile, tile_dir, 0, 5, "World") minZoom = 10 maxZoom = 16 bbox = (-2, 50.0,1.0,52.0) render_tiles(bbox, mapfile, tile_dir, minZoom, maxZoom) # Muenchen bbox = (11.4,48.07, 11.7,48.22) render_tiles(bbox, mapfile, tile_dir, 1, 12 , "Muenchen") # Muenchen+ bbox = (11.3,48.01, 12.15,48.44) render_tiles(bbox, mapfile, tile_dir, 7, 12 , "Muenchen+") # Muenchen++ bbox = (10.92,47.7, 12.24,48.61) render_tiles(bbox, mapfile, tile_dir, 7, 12 , "Muenchen++") # Nuernberg bbox=(10.903198,49.560441,49.633534,11.038085) render_tiles(bbox, mapfile, tile_dir, 10, 16, "Nuernberg") # Karlsruhe bbox=(8.179113,48.933617,8.489252,49.081707) render_tiles(bbox, mapfile, tile_dir, 10, 16, "Karlsruhe") # Karlsruhe+ bbox = (8.3,48.95,8.5,49.05) render_tiles(bbox, mapfile, tile_dir, 1, 16, "Karlsruhe+") # Augsburg bbox = (8.3,48.95,8.5,49.05) render_tiles(bbox, mapfile, tile_dir, 1, 16, "Augsburg") # Augsburg+ bbox=(10.773251,48.369594,10.883834,48.438577) render_tiles(bbox, mapfile, tile_dir, 10, 14, "Augsburg+") # Europe+ bbox = (1.0,10.0, 20.6,50.0) render_tiles(bbox, mapfile, tile_dir, 1, 11 , "Europe+")
jiazichenzhan/Server_Manage_Plugin
refs/heads/master
ironic-plugin-pike/ironic/tests/unit/db/test_chassis.py
11
# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Chassis via the DB API""" from oslo_utils import uuidutils import six from ironic.common import exception from ironic.tests.unit.db import base from ironic.tests.unit.db import utils class DbChassisTestCase(base.DbTestCase): def setUp(self): super(DbChassisTestCase, self).setUp() self.chassis = utils.create_test_chassis() def test_get_chassis_list(self): uuids = [self.chassis.uuid] for i in range(1, 6): ch = utils.create_test_chassis(uuid=uuidutils.generate_uuid()) uuids.append(six.text_type(ch.uuid)) res = self.dbapi.get_chassis_list() res_uuids = [r.uuid for r in res] six.assertCountEqual(self, uuids, res_uuids) def test_get_chassis_by_id(self): chassis = self.dbapi.get_chassis_by_id(self.chassis.id) self.assertEqual(self.chassis.uuid, chassis.uuid) def test_get_chassis_by_uuid(self): chassis = self.dbapi.get_chassis_by_uuid(self.chassis.uuid) self.assertEqual(self.chassis.id, chassis.id) def test_get_chassis_that_does_not_exist(self): self.assertRaises(exception.ChassisNotFound, self.dbapi.get_chassis_by_id, 666) def test_update_chassis(self): res = self.dbapi.update_chassis(self.chassis.id, {'description': 'hello'}) self.assertEqual('hello', res.description) def test_update_chassis_that_does_not_exist(self): self.assertRaises(exception.ChassisNotFound, self.dbapi.update_chassis, 666, {'description': ''}) def test_update_chassis_uuid(self): self.assertRaises(exception.InvalidParameterValue, self.dbapi.update_chassis, self.chassis.id, {'uuid': 'hello'}) def test_destroy_chassis(self): self.dbapi.destroy_chassis(self.chassis.id) self.assertRaises(exception.ChassisNotFound, self.dbapi.get_chassis_by_id, self.chassis.id) def test_destroy_chassis_that_does_not_exist(self): self.assertRaises(exception.ChassisNotFound, self.dbapi.destroy_chassis, 666) def test_destroy_chassis_with_nodes(self): utils.create_test_node(chassis_id=self.chassis.id) self.assertRaises(exception.ChassisNotEmpty, self.dbapi.destroy_chassis, self.chassis.id) def test_create_chassis_already_exists(self): self.assertRaises(exception.ChassisAlreadyExists, utils.create_test_chassis, uuid=self.chassis.uuid)
henry-ajere/rad2py
refs/heads/master
psp2py/languages/it.py
23
# coding: utf8 { '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ', '%Y-%m-%d': '%d/%m/%Y', '%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S', '%s rows deleted': '%s righe ("record") cancellate', '%s rows updated': '%s righe ("record") modificate', 'Available databases and tables': 'Database e tabelle disponibili', 'Cannot be empty': 'Non può essere vuoto', 'Check to delete': 'Seleziona per cancellare', 'Client IP': 'Client IP', 'Controller': 'Controller', 'Copyright': 'Copyright', 'Current request': 'Richiesta (request) corrente', 'Current response': 'Risposta (response) corrente', 'Current session': 'Sessione (session) corrente', 'DB Model': 'Modello di DB', 'Database': 'Database', 'Delete:': 'Cancella:', 'Description': 'Descrizione', 'E-mail': 'E-mail', 'Edit': 'Modifica', 'Edit This App': 'Modifica questa applicazione', 'Edit current record': 'Modifica record corrente', 'First name': 'Nome', 'Group ID': 'ID Gruppo', 'Hello World': 'Salve Mondo', 'Hello World in a flash!': 'Salve Mondo in un flash!', 'Import/Export': 'Importa/Esporta', 'Index': 'Indice', 'Internal State': 'Stato interno', 'Invalid Query': 'Richiesta (query) non valida', 'Invalid email': 'Email non valida', 'Last name': 'Cognome', 'Layout': 'Layout', 'Main Menu': 'Menu principale', 'Menu Model': 'Menu Modelli', 'Name': 'Nome', 'New Record': 'Nuovo elemento (record)', 'No databases in this application': 'Nessun database presente in questa applicazione', 'Origin': 'Origine', 'Password': 'Password', 'Powered by': 'Powered by', 'Query:': 'Richiesta (query):', 'Record ID': 'Record ID', 'Registration key': 'Chiave di Registazione', 'Reset Password key': 'Resetta chiave Password ', 'Role': 'Ruolo', 'Rows in table': 'Righe nella tabella', 'Rows selected': 'Righe selezionate', 'Stylesheet': 'Foglio di stile (stylesheet)', 'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?', 'Table name': 'Nome tabella', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.', 'The output of the file is a dictionary that was rendered by the view': 'L\'output del file è un "dictionary" che è stato visualizzato dalla vista', 'This is a copy of the scaffolding application': "Questa è una copia dell'applicazione di base (scaffold)", 'Timestamp': 'Ora (timestamp)', 'Update:': 'Aggiorna:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).', 'User ID': 'ID Utente', 'View': 'Vista', 'Welcome %s': 'Benvenuto %s', 'Welcome to web2py': 'Benvenuto su web2py', 'Which called the function': 'che ha chiamato la funzione', 'You are successfully running web2py': 'Stai eseguendo web2py con successo', 'You can modify this application and adapt it to your needs': 'Puoi modificare questa applicazione adattandola alle tue necessità', 'You visited the url': "Hai visitato l'URL", 'appadmin is disabled because insecure channel': 'Amministrazione (appadmin) disabilitata: comunicazione non sicura', 'cache': 'cache', 'change password': 'Cambia password', 'Online examples': 'Vedere gli esempi', 'Administrative interface': "Interfaccia amministrativa", 'customize me!': 'Personalizzami!', 'data uploaded': 'dati caricati', 'database': 'database', 'database %s select': 'database %s select', 'db': 'db', 'design': 'progetta', 'Documentation': 'Documentazione', 'done!': 'fatto!', 'edit profile': 'modifica profilo', 'export as csv file': 'esporta come file CSV', 'hello world': 'salve mondo', 'insert new': 'inserisci nuovo', 'insert new %s': 'inserisci nuovo %s', 'invalid request': 'richiesta non valida', 'located in the file': 'presente nel file', 'login': 'accesso', 'logout': 'uscita', 'lost password?': 'dimenticato la password?', 'new record inserted': 'nuovo record inserito', 'next 100 rows': 'prossime 100 righe', 'not authorized': 'non autorizzato', 'or import from csv file': 'oppure importa da file CSV', 'previous 100 rows': '100 righe precedenti', 'record': 'record', 'record does not exist': 'il record non esiste', 'record id': 'record id', 'register': 'registrazione', 'selected': 'selezionato', 'state': 'stato', 'table': 'tabella', 'unable to parse csv file': 'non riesco a decodificare questo file CSV', }
AlexMooney/python-pptx
refs/heads/master
pptx/dml/color.py
9
# encoding: utf-8 """ DrawingML objects related to color, ColorFormat being the most prominent. """ from __future__ import absolute_import, print_function, unicode_literals from ..enum.dml import MSO_COLOR_TYPE, MSO_THEME_COLOR from ..oxml.dml.color import ( CT_HslColor, CT_PresetColor, CT_SchemeColor, CT_ScRgbColor, CT_SRgbColor, CT_SystemColor ) class ColorFormat(object): """ Provides access to color settings such as RGB color, theme color, and luminance adjustments. """ def __init__(self, eg_colorChoice_parent, color): super(ColorFormat, self).__init__() self._xFill = eg_colorChoice_parent self._color = color @property def brightness(self): """ Read/write float value between -1.0 and 1.0 indicating the brightness adjustment for this color, e.g. -0.25 is 25% darker and 0.4 is 40% lighter. 0 means no brightness adjustment. """ return self._color.brightness @brightness.setter def brightness(self, value): self._validate_brightness_value(value) self._color.brightness = value @classmethod def from_colorchoice_parent(cls, eg_colorChoice_parent): xClr = eg_colorChoice_parent.eg_colorChoice color = _Color(xClr) color_format = cls(eg_colorChoice_parent, color) return color_format @property def rgb(self): """ |RGBColor| value of this color, or None if no RGB color is explicitly defined for this font. Setting this value to an |RGBColor| instance causes its type to change to MSO_COLOR_TYPE.RGB. If the color was a theme color with a brightness adjustment, the brightness adjustment is removed when changing it to an RGB color. """ return self._color.rgb @rgb.setter def rgb(self, rgb): if not isinstance(rgb, RGBColor): raise ValueError('assigned value must be type RGBColor') # change to rgb color format if not already if not isinstance(self._color, _SRgbColor): srgbClr = self._xFill.get_or_change_to_srgbClr() self._color = _SRgbColor(srgbClr) # call _SRgbColor instance to do the setting self._color.rgb = rgb @property def theme_color(self): """ Theme color value of this color, one of those defined in the MSO_THEME_COLOR enumeration, e.g. MSO_THEME_COLOR.ACCENT_1. Raises AttributeError on access if the color is not type ``MSO_COLOR_TYPE.SCHEME``. Assigning a value in ``MSO_THEME_COLOR`` causes the color's type to change to ``MSO_COLOR_TYPE.SCHEME``. """ return self._color.theme_color @theme_color.setter def theme_color(self, mso_theme_color_idx): # change to theme color format if not already if not isinstance(self._color, _SchemeColor): schemeClr = self._xFill.get_or_change_to_schemeClr() self._color = _SchemeColor(schemeClr) self._color.theme_color = mso_theme_color_idx @property def type(self): """ Read-only. A value from :ref:`MsoColorType`, either RGB or SCHEME, corresponding to the way this color is defined, or None if no color is defined at the level of this font. """ return self._color.color_type def _validate_brightness_value(self, value): if value < -1.0 or value > 1.0: raise ValueError('brightness must be number in range -1.0 to 1.0') if isinstance(self._color, _NoneColor): msg = ( "can't set brightness when color.type is None. Set color.rgb" " or .theme_color first." ) raise ValueError(msg) class _Color(object): """ Object factory for color object of the appropriate type, also the base class for all color type classes such as SRgbColor. """ def __new__(cls, xClr): color_cls = { type(None): _NoneColor, CT_HslColor: _HslColor, CT_PresetColor: _PrstColor, CT_SchemeColor: _SchemeColor, CT_ScRgbColor: _ScRgbColor, CT_SRgbColor: _SRgbColor, CT_SystemColor: _SysColor, }[type(xClr)] return super(_Color, cls).__new__(color_cls) def __init__(self, xClr): super(_Color, self).__init__() self._xClr = xClr @property def brightness(self): lumMod, lumOff = self._xClr.lumMod, self._xClr.lumOff # a tint is lighter, a shade is darker # only tints have lumOff child if lumOff is not None: brightness = lumOff.val return brightness # which leaves shades, if lumMod is present if lumMod is not None: brightness = lumMod.val - 1.0 return brightness # there's no brightness adjustment if no lum{Mod|Off} elements return 0 @brightness.setter def brightness(self, value): if value > 0: self._tint(value) elif value < 0: self._shade(value) else: self._xClr.clear_lum() @property def color_type(self): # pragma: no cover tmpl = ".color_type property must be implemented on %s" raise NotImplementedError(tmpl % self.__class__.__name__) @property def rgb(self): """ Raises TypeError on access unless overridden by subclass. """ tmpl = "no .rgb property on color type '%s'" raise AttributeError(tmpl % self.__class__.__name__) @property def theme_color(self): """ Raises TypeError on access unless overridden by subclass. """ return MSO_THEME_COLOR.NOT_THEME_COLOR def _shade(self, value): lumMod_val = 1.0 - abs(value) color_elm = self._xClr.clear_lum() color_elm.add_lumMod(lumMod_val) def _tint(self, value): lumOff_val = value lumMod_val = 1.0 - lumOff_val color_elm = self._xClr.clear_lum() color_elm.add_lumMod(lumMod_val) color_elm.add_lumOff(lumOff_val) class _HslColor(_Color): @property def color_type(self): return MSO_COLOR_TYPE.HSL class _NoneColor(_Color): @property def color_type(self): return None @property def theme_color(self): """ Raise TypeError on attempt to access .theme_color when no color choice is present. """ tmpl = "no .theme_color property on color type '%s'" raise AttributeError(tmpl % self.__class__.__name__) class _PrstColor(_Color): @property def color_type(self): return MSO_COLOR_TYPE.PRESET class _SchemeColor(_Color): def __init__(self, schemeClr): super(_SchemeColor, self).__init__(schemeClr) self._schemeClr = schemeClr @property def color_type(self): return MSO_COLOR_TYPE.SCHEME @property def theme_color(self): """ Theme color value of this color, one of those defined in the MSO_THEME_COLOR enumeration, e.g. MSO_THEME_COLOR.ACCENT_1. None if no theme color is explicitly defined for this font. Setting this to a value in MSO_THEME_COLOR causes the color's type to change to ``MSO_COLOR_TYPE.SCHEME``. """ return self._schemeClr.val @theme_color.setter def theme_color(self, mso_theme_color_idx): self._schemeClr.val = mso_theme_color_idx class _ScRgbColor(_Color): @property def color_type(self): return MSO_COLOR_TYPE.SCRGB class _SRgbColor(_Color): def __init__(self, srgbClr): super(_SRgbColor, self).__init__(srgbClr) self._srgbClr = srgbClr @property def color_type(self): return MSO_COLOR_TYPE.RGB @property def rgb(self): """ |RGBColor| value of this color, corresponding to the value in the required ``val`` attribute of the ``<a:srgbColr>`` element. """ return RGBColor.from_string(self._srgbClr.val) @rgb.setter def rgb(self, rgb): self._srgbClr.val = str(rgb) class _SysColor(_Color): @property def color_type(self): return MSO_COLOR_TYPE.SYSTEM class RGBColor(tuple): """ Immutable value object defining a particular RGB color. """ def __new__(cls, r, g, b): msg = 'RGBColor() takes three integer values 0-255' for val in (r, g, b): if not isinstance(val, int) or val < 0 or val > 255: raise ValueError(msg) return super(RGBColor, cls).__new__(cls, (r, g, b)) def __str__(self): """ Return a hex string rgb value, like '3C2F80' """ return '%02X%02X%02X' % self @classmethod def from_string(cls, rgb_hex_str): """ Return a new instance from an RGB color hex string like ``'3C2F80'``. """ r = int(rgb_hex_str[:2], 16) g = int(rgb_hex_str[2:4], 16) b = int(rgb_hex_str[4:], 16) return cls(r, g, b)
Novasoft-India/OperERP-AM-Motors
refs/heads/master
openerp/addons/mail/res_partner.py
52
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tools.translate import _ from openerp.osv import fields, osv class res_partner_mail(osv.Model): """ Update partner to add a field about notification preferences """ _name = "res.partner" _inherit = ['res.partner', 'mail.thread'] _mail_flat_thread = False _columns = { 'notification_email_send': fields.selection([ ('none', 'Never'), ('email', 'Incoming Emails only'), ('comment', 'Incoming Emails and Discussions'), ('all', 'All Messages (discussions, emails, followed system notifications)'), ], 'Receive Messages by Email', required=True, help="Policy to receive emails for new messages pushed to your personal Inbox:\n" "- Never: no emails are sent\n" "- Incoming Emails only: for messages received by the system via email\n" "- Incoming Emails and Discussions: for incoming emails along with internal discussions\n" "- All Messages: for every notification you receive in your Inbox"), } _defaults = { 'notification_email_send': lambda *args: 'comment' } def message_get_suggested_recipients(self, cr, uid, ids, context=None): recipients = super(res_partner_mail, self).message_get_suggested_recipients(cr, uid, ids, context=context) for partner in self.browse(cr, uid, ids, context=context): self._message_add_suggested_recipient(cr, uid, recipients, partner, partner=partner, reason=_('Partner Profile')) return recipients def message_post(self, cr, uid, thread_id, **kwargs): """ Override related to res.partner. In case of email message, set it as private: - add the target partner in the message partner_ids - set thread_id as None, because this will trigger the 'private' aspect of the message (model=False, res_id=False) """ if isinstance(thread_id, (list, tuple)): thread_id = thread_id[0] if kwargs.get('type') == 'email': partner_ids = kwargs.get('partner_ids', []) if thread_id not in [command[1] for command in partner_ids]: partner_ids.append((4, thread_id)) kwargs['partner_ids'] = partner_ids thread_id = False return super(res_partner_mail, self).message_post(cr, uid, thread_id, **kwargs) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
rallylee/gem5
refs/heads/master
ext/ply/test/yacc_unused.py
174
# ----------------------------------------------------------------------------- # yacc_unused.py # # A grammar with an unused rule # ----------------------------------------------------------------------------- import sys if ".." not in sys.path: sys.path.insert(0,"..") import ply.yacc as yacc from calclex import tokens # Parsing rules precedence = ( ('left','PLUS','MINUS'), ('left','TIMES','DIVIDE'), ('right','UMINUS'), ) # dictionary of names names = { } def p_statement_assign(t): 'statement : NAME EQUALS expression' names[t[1]] = t[3] def p_statement_expr(t): 'statement : expression' print(t[1]) def p_expression_binop(t): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression''' if t[2] == '+' : t[0] = t[1] + t[3] elif t[2] == '-': t[0] = t[1] - t[3] elif t[2] == '*': t[0] = t[1] * t[3] elif t[2] == '/': t[0] = t[1] / t[3] def p_expression_uminus(t): 'expression : MINUS expression %prec UMINUS' t[0] = -t[2] def p_expression_group(t): 'expression : LPAREN expression RPAREN' t[0] = t[2] def p_expression_number(t): 'expression : NUMBER' t[0] = t[1] def p_expression_name(t): 'expression : NAME' try: t[0] = names[t[1]] except LookupError: print("Undefined name '%s'" % t[1]) t[0] = 0 def p_expr_list(t): 'exprlist : exprlist COMMA expression' pass def p_expr_list_2(t): 'exprlist : expression' pass def p_error(t): print("Syntax error at '%s'" % t.value) yacc.yacc()
vincentfretin/robotframework-selenium2library
refs/heads/master
src/Selenium2Library/locators/tableelementfinder.py
1
from .elementfinder import ElementFinder class TableElementFinder(object): def __init__(self, element_finder=None): if not element_finder: element_finder = ElementFinder() self._element_finder = element_finder self._locator_suffixes = { ('css', 'default'): [''], ('css', 'content'): [''], ('css', 'header'): [' th'], ('css', 'footer'): [' tfoot td'], ('css', 'row'): [' tr:nth-child(%s)'], ('css', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'], ('jquery', 'default'): [''], ('jquery', 'content'): [''], ('jquery', 'header'): [' th'], ('jquery', 'footer'): [' tfoot td'], ('jquery', 'row'): [' tr:nth-child(%s)'], ('jquery', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'], ('sizzle', 'default'): [''], ('sizzle', 'content'): [''], ('sizzle', 'header'): [' th'], ('sizzle', 'footer'): [' tfoot td'], ('sizzle', 'row'): [' tr:nth-child(%s)'], ('sizzle', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'], ('xpath', 'default'): [''], ('xpath', 'content'): ['//*'], ('xpath', 'header'): ['//th'], ('xpath', 'footer'): ['//tfoot//td'], ('xpath', 'row'): ['//tr[%s]//*'], ('xpath', 'col'): ['//tr//*[self::td or self::th][%s]'] }; def find(self, browser, table_locator): locators = self._parse_table_locator(table_locator, 'default') return self._search_in_locators(browser, locators, None) def find_by_content(self, browser, table_locator, content): locators = self._parse_table_locator(table_locator, 'content') return self._search_in_locators(browser, locators, content) def find_by_header(self, browser, table_locator, content): locators = self._parse_table_locator(table_locator, 'header') return self._search_in_locators(browser, locators, content) def find_by_footer(self, browser, table_locator, content): locators = self._parse_table_locator(table_locator, 'footer') return self._search_in_locators(browser, locators, content) def find_by_row(self, browser, table_locator, col, content): locators = self._parse_table_locator(table_locator, 'row') locators = [locator % str(col) for locator in locators] return self._search_in_locators(browser, locators, content) def find_by_col(self, browser, table_locator, col, content): locators = self._parse_table_locator(table_locator, 'col') locators = [locator % str(col) for locator in locators] return self._search_in_locators(browser, locators, content) def _parse_table_locator(self, table_locator, location_method): if table_locator.startswith('xpath='): table_locator_type = 'xpath' elif table_locator.startswith('jquery=') or table_locator.startswith('sizzle='): table_locator_type = 'sizzle' else: if not table_locator.startswith('css='): table_locator = "css=table#%s" % table_locator table_locator_type = 'css' locator_suffixes = self._locator_suffixes[(table_locator_type, location_method)] return map( lambda locator_suffix: table_locator + locator_suffix, locator_suffixes) def _search_in_locators(self, browser, locators, content): for locator in locators: elements = self._element_finder.find(browser, locator) for element in elements: if content is None: return element element_text = element.text if element_text and content in element_text: return element return None
portnov/sverchok
refs/heads/master
nodes/vector/vector_polar_out.py
3
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### from math import sin, cos, atan, atan2, degrees, sqrt, acos import bpy from bpy.props import FloatProperty, EnumProperty from sverchok.node_tree import SverchCustomTreeNode, StringsSocket from sverchok.data_structure import updateNode, match_long_repeat def cylindrical(v, mode): x,y,z = v rho = sqrt(x*x + y*y) phi = atan2(y,x) if mode == "degrees": phi = degrees(phi) return rho, phi, z def spherical(v, mode): x,y,z = v rho = sqrt(x*x + y*y + z*z) if rho == 0.0: return 0.0, 0.0, 0.0 theta = acos(z/rho) phi = atan2(y,x) if mode == "degrees": phi = degrees(phi) theta = degrees(theta) return rho, phi, theta class VectorPolarOutNode(bpy.types.Node, SverchCustomTreeNode): '''Get cylindrical or spherical coordinates from vectors''' bl_idname = 'VectorPolarOutNode' bl_label = 'Vector polar output' bl_icon = 'OUTLINER_OB_EMPTY' coord_modes = [ ("z", "Cylinder", "Use cylindrical coordinates", 1), ("theta", "Sphere", "Use spherical coordinates", 2), ] def coordinate_changed(self, context): self.outputs["z"].hide = self.coordinates == "theta" self.outputs["theta"].hide = self.coordinates == "z" updateNode(self, context) coordinates = EnumProperty(items=coord_modes, default='z', update=coordinate_changed) func_dict = {'z': cylindrical, 'theta': spherical} angle_modes = [ ("radians", "Radian", "Use angles in radians", 1), ("degrees", "Degree", "Use angles in degrees", 2) ] angles_mode = EnumProperty(items=angle_modes, default="radians", update=updateNode) def sv_init(self, context): self.inputs.new("VerticesSocket", "Vectors") self.width = 100 self.outputs.new('StringsSocket', "rho") self.outputs.new('StringsSocket', "phi") self.outputs.new('StringsSocket', "z") self.outputs.new('StringsSocket', "theta").hide = True def draw_buttons(self, context, layout): layout.prop(self, "coordinates", expand=True) layout.prop(self, "angles_mode", expand=True) def process(self): if not (self.outputs['rho'].is_linked or self.outputs['phi'].is_linked or self.outputs[self.coordinates].is_linked): return vss = self.inputs['Vectors'].sv_get() result_rhos = [] result_phis = [] result_zs = [] for vs in vss: rs = [] ps = [] zs = [] for v in vs: rho, phi, z = self.func_dict[self.coordinates](v, self.angles_mode) rs.append(rho) ps.append(phi) zs.append(z) result_rhos.append(rs) result_phis.append(ps) result_zs.append(zs) if self.outputs['rho'].is_linked: self.outputs['rho'].sv_set(result_rhos) if self.outputs['phi'].is_linked: self.outputs['phi'].sv_set(result_phis) if self.outputs[self.coordinates].is_linked: self.outputs[self.coordinates].sv_set(result_zs) def register(): bpy.utils.register_class(VectorPolarOutNode) def unregister(): bpy.utils.unregister_class(VectorPolarOutNode)
erueloi/django-allauth
refs/heads/master
allauth/socialaccount/providers/vk/tests.py
71
from __future__ import absolute_import from allauth.socialaccount.tests import create_oauth2_tests from allauth.socialaccount.providers import registry from allauth.tests import MockedResponse from .provider import VKProvider class VKTests(create_oauth2_tests(registry.by_id(VKProvider.id))): def get_mocked_response(self, verified_email=True): return MockedResponse(200, """ {"response": [{"last_name": "Penners", "university_name": "", "photo": "http://vk.com/images/camera_c.gif", "sex": 2, "photo_medium": "http://vk.com/images/camera_b.gif", "relation": "0", "timezone": 1, "photo_big": "http://vk.com/images/camera_a.gif", "uid": 219004864, "universities": [], "city": "1430", "first_name": "Raymond", "faculty_name": "", "online": 1, "counters": {"videos": 0, "online_friends": 0, "notes": 0, "audios": 0, "photos": 0, "followers": 0, "groups": 0, "user_videos": 0, "albums": 0, "friends": 0}, "home_phone": "", "faculty": 0, "nickname": "", "screen_name": "id219004864", "has_mobile": 1, "country": "139", "university": 0, "graduation": 0, "activity": "", "last_seen": {"time": 1377805189}}]} """) def get_login_response_json(self, with_refresh_token=True): return '{"user_id": 219004864, "access_token":"testac"}'
hungerburg/exist
refs/heads/develop
bin/deprecated/query.py
18
#!/usr/bin/python # eXist xml document repository and xpath implementation # Copyright (C) 2001, Wolfgang M. Meier ([email protected]) # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU Library General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # import httplib, getopt, sys, readline from string import split, replace, atoi, rfind import re, time class eXistClient: host = '127.0.0.1:8088' requestFile = '' xslStyle = '' display = 1 start = 1 howmany = 15 outfile = '' indent = 'true' def __init__(self, args): optlist, args = getopt.getopt(args[1:], 'hqis:b:p:') quiet = 0 for i in optlist: if i[0] == '-h': self.printUsage() sys.exit(0) elif i[0] == '-s': self.host = i[1] elif i[0] == '-b': self.benchmark(i[1]) return elif i[0] == '-q': self.quiet = 1 elif i[0] == '-i': self.indent = 'false' elif i[0] == '-p': self.parse(i[1], args) return if not quiet: self.printBanner() if len(args) < 1: self.interactive() return else: try: freq = open(args[0], 'r') except IOError: print 'Unable to open file ', args[0] sys.exit(0) else: req = freq.read() freq.close() self.doQuery(req) def interactive(self): print '\npress h or ? for help on available commands.' while 1: s = raw_input('exist> ') line = split(s, ' ', 1) if line[0] in ('find', 'f'): r = self.queryRequest(line[1], 1, self.howmany) print r resp = self.doQuery(r) if self.outfile: o = open(self.outfile, 'w') o.write(resp) print 'output written to ', self.outfile else: print '\nserver responded:\n' print resp elif line[0] in ('get', 'g'): args = split(line[1]) if len(args) > 1: self.getRequest(args[0], args[1]) else: self.getRequest(args[0]) elif line[0] in ('url', 'u'): self.host = line[1] print 'set host address = %s' % self.host elif line[0] in ('display', 'd'): self.setDisplay(split(line[1])) elif line[0] in ('output', 'o'): self.outfile = line[1] print 'set output file = %s' % self.outfile elif line[0] in ('bench', 'b'): self.benchmark(line[1]) elif line[0] in ('remove', 'r'): self.remove(line[1]) elif line[0] in ('parse', 'p'): args = split(line[1], ' ', 1) if len(args) > 1: self.parse(args[0], [ args[1] ]) else: self.parse(args[0], []) elif line[0] in ('help', '?', 'h'): self.getHelp() elif line[0] in ('quit', 'q'): break else: print 'unknown command: ' + `line[0]` def setDisplay(self, line): self.display = 1 for i in line: self.setArg(i) def setArg(self, arg): if arg in ('summary', 's'): self.display = 0 print 'summarize = %i' % self.display elif arg in ('all', 'a'): self.display = 1 print 'summarize = %i' % self.display else: self.howmany = atoi(arg) print 'howmany = %s' % self.howmany def getRequest(self, document, gid = ""): if gid != "": gid = 'id="%s"' temp = """ <exist:request xmlns:exist="http://exist.sourceforge.net/NS/exist"> <exist:display indent="%s"/> <exist:get document="%s" %s/> </exist:request> """ req = temp % (self.indent, document, gid) print req resp = self.doQuery(req) if self.outfile: o = open(self.outfile, 'w') o.write(resp) print 'output written to ', self.outfile else: print '\nserver responded:\n' print resp def queryRequest(self, query, start, howmany): temp = """ <exist:request xmlns:exist="http://exist.sourceforge.net/NS/exist"> <exist:query>%s</exist:query> <exist:%s indent="%s" howmany="%i" start="%i"/> </exist:request> """ if self.display: disp = "display" else: disp = "summarize" return temp % ( self.escape(query), disp, self.indent, howmany, start) def remove(self, doc): temp = """ <exist:request xmlns:exist="http://exist.sourceforge.net/NS/exist"> <exist:remove document="%s"/> </exist:request> """ req = temp % ( doc ) print req resp = self.doQuery(req) print resp def escape(self, str): n = '' for c in str: if c == '&': n = n + '&amp;' elif c == '<': n = n + '&lt;' elif c == '>': n = n + '&gt;' else: n = n + c return n def parse(self, file, args): p = rfind(file, '/') if p > -1: doc = file[p+1:] else: doc = file if(len(args) > 0): doc = args[0] + "/" + doc f = open(file, 'r') print "reading file %s ..." % file xml = f.read() f.close() print "ok.\nsending %s to server ..." % doc con = httplib.HTTP(self.host) con.putrequest('PUT', doc) con.putheader('Accept', 'text/xml') clen = len(xml) con.putheader('Content-Length', `clen`) con.endheaders() con.send(xml) errcode, errmsg, headers = con.getreply() if errcode != 200: print 'an error occurred: %s' % errmsg else: print "ok." def doQuery(self, request): con = httplib.HTTP(self.host) con.putrequest('POST', '/') con.putheader('Accept', 'text/xml') clen = len(request) con.putheader('Content-Length', `clen`) con.endheaders() print 'Sending request ...\n' con.send(request) errcode, errmsg, headers = con.getreply() if errcode != 200: print 'an error occurred: %s' % errmsg return f = con.getfile() data = f.read() f.close() return data def benchmark(self, benchfile): bench = open(benchfile, 'r') o = open('benchmark.out', 'w') queries = bench.readlines() print '%-10s | %-10s | %-50s' % ("query", "retrieve", "query string") print '=' * 75 qt = 0.0 rt = 0.0 i = 1 for qu in queries: start = time.clock() req = self.queryRequest(qu, 1, 20) data = self.doQuery(req) queryTime = re.search('queryTime="([0-9]+)"', data).group(1) #retrTime = re.search('retrieveTime="([0-9]+)"', data).group(1) retrTime = 0 print '%-10s | %-10s ==> %-47s' % (queryTime, retrTime, qu[0:50]) i = i + 1 bench.close() def getHelp(self): print """ Available commands: h|help print this help message g|get docName retrieve document docName from the database r|remove docName remove document docName from the database p|parse file [collection] parse and store file to the repository f|find expr create query request with expr as query argument d|display [ [a|all] | [s|summary] ] [howmany] all : return the actual content of matching nodes summary: just return a short summary of hits per document howmany: howmany nodes should be returned at maximum o|output file write server response to file u|url host:port set server address to host:port b|bench file execute queries contained in file and print statistics """ def printUsage(self): print """ Usage: query.py [-h] [-s server] [-b benchmark-file] request-file -h Display this message -s Server address (e.g. localhost:8088) -b Benchmark: Execute queries from benchmark-file and print statistics -i Switch off indentation of results """ def printBanner(self): print """ eXist version 0.5, Copyright (C) 2001 Wolfgang M. Meier eXist comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions; for details read the license file. """ c = eXistClient(sys.argv)
carnell69/kuma
refs/heads/master
vendor/packages/logilab/astng/test/regrtest_data/package/__init__.py
25
"""package's __init__ file""" import subpackage
MosheBerman/brisket-mashup
refs/heads/master
source/libraries/httplib2-0.8/python3/httplib2/__init__.py
43
""" httplib2 A caching http interface that supports ETags and gzip to conserve bandwidth. Requires Python 3.0 or later Changelog: 2009-05-28, Pilgrim: ported to Python 3 2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. """ __author__ = "Joe Gregorio ([email protected])" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = ["Thomas Broyer ([email protected])", "James Antill", "Xavier Verges Farrero", "Jonathan Feinberg", "Blair Zajac", "Sam Ruby", "Louis Nyffenegger", "Mark Pilgrim"] __license__ = "MIT" __version__ = "0.8" import re import sys import email import email.utils import email.message import email.feedparser import io import gzip import zlib import http.client import urllib.parse import base64 import os import copy import calendar import time import random import errno from hashlib import sha1 as _sha, md5 as _md5 import hmac from gettext import gettext as _ import socket import ssl _ssl_wrap_socket = ssl.wrap_socket try: import socks except ImportError: socks = None from .iri2uri import iri2uri def has_timeout(timeout): if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'): return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT) return (timeout is not None) __all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', 'debuglevel', 'RETRIES'] # The httplib debug level, set to a non-zero value to get debug output debuglevel = 0 # A request will be tried 'RETRIES' times if it fails at the socket/connection level. RETRIES = 2 # All exceptions raised here derive from HttpLib2Error class HttpLib2Error(Exception): pass # Some exceptions can be caught and optionally # be turned back into responses. class HttpLib2ErrorWithResponse(HttpLib2Error): def __init__(self, desc, response, content): self.response = response self.content = content HttpLib2Error.__init__(self, desc) class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass class RedirectLimit(HttpLib2ErrorWithResponse): pass class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class MalformedHeader(HttpLib2Error): pass class RelativeURIError(HttpLib2Error): pass class ServerNotFoundError(HttpLib2Error): pass class CertificateValidationUnsupportedInPython31(HttpLib2Error): pass # Open Items: # ----------- # Proxy support # Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) # Pluggable cache storage (supports storing the cache in # flat files by default. We need a plug-in architecture # that can support Berkeley DB and Squid) # == Known Issues == # Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. # Does not handle Cache-Control: max-stale # Does not use Age: headers when calculating cache freshness. # The number of redirections to follow before giving up. # Note that only GET redirects are automatically followed. # Will also honor 301 requests by saving that info and never # requesting that URI again. DEFAULT_MAX_REDIRECTS = 5 # Which headers are hop-by-hop headers by default HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] # Default CA certificates file bundled with httplib2. CA_CERTS = os.path.join( os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt") def _get_end2end_headers(response): hopbyhop = list(HOP_BY_HOP) hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) return [header for header in list(response.keys()) if header not in hopbyhop] URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8]) def urlnorm(uri): (scheme, authority, path, query, fragment) = parse_uri(uri) if not scheme or not authority: raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) authority = authority.lower() scheme = scheme.lower() if not path: path = "/" # Could do syntax based normalization of the URI before # computing the digest. See Section 6.2.2 of Std 66. request_uri = query and "?".join([path, query]) or path scheme = scheme.lower() defrag_uri = scheme + "://" + authority + request_uri return scheme, authority, request_uri, defrag_uri # Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) re_url_scheme = re.compile(br'^\w+://') re_url_scheme_s = re.compile(r'^\w+://') re_slash = re.compile(br'[?/:|]+') def safename(filename): """Return a filename suitable for the cache. Strips dangerous and common characters to create a filename we can use to store the cache in. """ try: if re_url_scheme_s.match(filename): if isinstance(filename,bytes): filename = filename.decode('utf-8') filename = filename.encode('idna') else: filename = filename.encode('idna') except UnicodeError: pass if isinstance(filename,str): filename=filename.encode('utf-8') filemd5 = _md5(filename).hexdigest().encode('utf-8') filename = re_url_scheme.sub(b"", filename) filename = re_slash.sub(b",", filename) # limit length of filename if len(filename)>200: filename=filename[:200] return b",".join((filename, filemd5)).decode('utf-8') NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') def _normalize_headers(headers): return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.items()]) def _parse_cache_control(headers): retval = {} if 'cache-control' in headers: parts = headers['cache-control'].split(',') parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")] parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")] retval = dict(parts_with_args + parts_wo_args) return retval # Whether to use a strict mode to parse WWW-Authenticate headers # Might lead to bad results in case of ill-formed header value, # so disabled by default, falling back to relaxed parsing. # Set to true to turn on, usefull for testing servers. USE_WWW_AUTH_STRICT_PARSING = 0 # In regex below: # [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP # "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space # Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: # \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$") UNQUOTE_PAIRS = re.compile(r'\\(.)') def _parse_www_authenticate(headers, headername='www-authenticate'): """Returns a dictionary of dictionaries, one dict per auth_scheme.""" retval = {} if headername in headers: try: authenticate = headers[headername].strip() www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED while authenticate: # Break off the scheme at the beginning of the line if headername == 'authentication-info': (auth_scheme, the_rest) = ('digest', authenticate) else: (auth_scheme, the_rest) = authenticate.split(" ", 1) # Now loop over all the key value pairs that come after the scheme, # being careful not to roll into the next scheme match = www_auth.search(the_rest) auth_params = {} while match: if match and len(match.groups()) == 3: (key, value, the_rest) = match.groups() auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) match = www_auth.search(the_rest) retval[auth_scheme.lower()] = auth_params authenticate = the_rest.strip() except ValueError: raise MalformedHeader("WWW-Authenticate") return retval def _entry_disposition(response_headers, request_headers): """Determine freshness from the Date, Expires and Cache-Control headers. We don't handle the following: 1. Cache-Control: max-stale 2. Age: headers are not used in the calculations. Not that this algorithm is simpler than you might think because we are operating as a private (non-shared) cache. This lets us ignore 's-maxage'. We can also ignore 'proxy-invalidate' since we aren't a proxy. We will never return a stale document as fresh as a design decision, and thus the non-implementation of 'max-stale'. This also lets us safely ignore 'must-revalidate' since we operate as if every server has sent 'must-revalidate'. Since we are private we get to ignore both 'public' and 'private' parameters. We also ignore 'no-transform' since we don't do any transformations. The 'no-store' parameter is handled at a higher level. So the only Cache-Control parameters we look at are: no-cache only-if-cached max-age min-fresh """ retval = "STALE" cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if 'pragma' in request_headers and request_headers['pragma'].lower().find('no-cache') != -1: retval = "TRANSPARENT" if 'cache-control' not in request_headers: request_headers['cache-control'] = 'no-cache' elif 'no-cache' in cc: retval = "TRANSPARENT" elif 'no-cache' in cc_response: retval = "STALE" elif 'only-if-cached' in cc: retval = "FRESH" elif 'date' in response_headers: date = calendar.timegm(email.utils.parsedate_tz(response_headers['date'])) now = time.time() current_age = max(0, now - date) if 'max-age' in cc_response: try: freshness_lifetime = int(cc_response['max-age']) except ValueError: freshness_lifetime = 0 elif 'expires' in response_headers: expires = email.utils.parsedate_tz(response_headers['expires']) if None == expires: freshness_lifetime = 0 else: freshness_lifetime = max(0, calendar.timegm(expires) - date) else: freshness_lifetime = 0 if 'max-age' in cc: try: freshness_lifetime = int(cc['max-age']) except ValueError: freshness_lifetime = 0 if 'min-fresh' in cc: try: min_fresh = int(cc['min-fresh']) except ValueError: min_fresh = 0 current_age += min_fresh if freshness_lifetime > current_age: retval = "FRESH" return retval def _decompressContent(response, new_content): content = new_content try: encoding = response.get('content-encoding', None) if encoding in ['gzip', 'deflate']: if encoding == 'gzip': content = gzip.GzipFile(fileobj=io.BytesIO(new_content)).read() if encoding == 'deflate': content = zlib.decompress(content) response['content-length'] = str(len(content)) # Record the historical presence of the encoding in a way the won't interfere. response['-content-encoding'] = response['content-encoding'] del response['content-encoding'] except IOError: content = "" raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) return content def _bind_write_headers(msg): from email.header import Header def _write_headers(self): # Self refers to the Generator object for h, v in msg.items(): print('%s:' % h, end=' ', file=self._fp) if isinstance(v, Header): print(v.encode(maxlinelen=self._maxheaderlen), file=self._fp) else: # Header's got lots of smarts, so use it. header = Header(v, maxlinelen=self._maxheaderlen, charset='utf-8', header_name=h) print(header.encode(), file=self._fp) # A blank line always separates headers from body print(file=self._fp) return _write_headers def _updateCache(request_headers, response_headers, content, cache, cachekey): if cachekey: cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if 'no-store' in cc or 'no-store' in cc_response: cache.delete(cachekey) else: info = email.message.Message() for key, value in response_headers.items(): if key not in ['status','content-encoding','transfer-encoding']: info[key] = value # Add annotations to the cache to indicate what headers # are variant for this request. vary = response_headers.get('vary', None) if vary: vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header try: info[key] = request_headers[header] except KeyError: pass status = response_headers.status if status == 304: status = 200 status_header = 'status: %d\r\n' % status try: header_str = info.as_string() except UnicodeEncodeError: setattr(info, '_write_headers', _bind_write_headers(info)) header_str = info.as_string() header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) text = b"".join([status_header.encode('utf-8'), header_str.encode('utf-8'), content]) cache.set(cachekey, text) def _cnonce(): dig = _md5(("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).encode('utf-8')).hexdigest() return dig[:16] def _wsse_username_token(cnonce, iso_now, password): return base64.b64encode(_sha(("%s%s%s" % (cnonce, iso_now, password)).encode('utf-8')).digest()).strip() # For credentials we need two things, first # a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) # Then we also need a list of URIs that have already demanded authentication # That list is tricky since sub-URIs can take the same auth, or the # auth scheme may change as you descend the tree. # So we also need each Auth instance to be able to tell us # how close to the 'top' it is. class Authentication(object): def __init__(self, credentials, host, request_uri, headers, response, content, http): (scheme, authority, path, query, fragment) = parse_uri(request_uri) self.path = path self.host = host self.credentials = credentials self.http = http def depth(self, request_uri): (scheme, authority, path, query, fragment) = parse_uri(request_uri) return request_uri[len(self.path):].count("/") def inscope(self, host, request_uri): # XXX Should we normalize the request_uri? (scheme, authority, path, query, fragment) = parse_uri(request_uri) return (host == self.host) and path.startswith(self.path) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header. Over-rise this in sub-classes.""" pass def response(self, response, content): """Gives us a chance to update with new nonces or such returned from the last authorized response. Over-rise this in sub-classes if necessary. Return TRUE is the request is to be retried, for example Digest may return stale=true. """ return False def __eq__(self, auth): return False def __ne__(self, auth): return True def __lt__(self, auth): return True def __gt__(self, auth): return False def __le__(self, auth): return True def __ge__(self, auth): return False def __bool__(self): return True class BasicAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'Basic ' + base64.b64encode(("%s:%s" % self.credentials).encode('utf-8')).strip().decode('utf-8') class DigestAuthentication(Authentication): """Only do qop='auth' and MD5, since that is all Apache currently implements""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['digest'] qop = self.challenge.get('qop', 'auth') self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None if self.challenge['qop'] is None: raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop)) self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper() if self.challenge['algorithm'] != 'MD5': raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]]) self.challenge['nc'] = 1 def request(self, method, request_uri, headers, content, cnonce = None): """Modify the request headers""" H = lambda x: _md5(x.encode('utf-8')).hexdigest() KD = lambda s, d: H("%s:%s" % (s, d)) A2 = "".join([method, ":", request_uri]) self.challenge['cnonce'] = cnonce or _cnonce() request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % ( self.challenge['nonce'], '%08x' % self.challenge['nc'], self.challenge['cnonce'], self.challenge['qop'], H(A2))) headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['nonce'], request_uri, self.challenge['algorithm'], request_digest, self.challenge['qop'], self.challenge['nc'], self.challenge['cnonce']) if self.challenge.get('opaque'): headers['authorization'] += ', opaque="%s"' % self.challenge['opaque'] self.challenge['nc'] += 1 def response(self, response, content): if 'authentication-info' not in response: challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {}) if 'true' == challenge.get('stale'): self.challenge['nonce'] = challenge['nonce'] self.challenge['nc'] = 1 return True else: updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {}) if 'nextnonce' in updated_challenge: self.challenge['nonce'] = updated_challenge['nextnonce'] self.challenge['nc'] = 1 return False class HmacDigestAuthentication(Authentication): """Adapted from Robert Sayre's code and DigestAuthentication above.""" __author__ = "Thomas Broyer ([email protected])" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['hmacdigest'] # TODO: self.challenge['domain'] self.challenge['reason'] = self.challenge.get('reason', 'unauthorized') if self.challenge['reason'] not in ['unauthorized', 'integrity']: self.challenge['reason'] = 'unauthorized' self.challenge['salt'] = self.challenge.get('salt', '') if not self.challenge.get('snonce'): raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty.")) self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1') if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1') if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm'])) if self.challenge['algorithm'] == 'HMAC-MD5': self.hashmod = _md5 else: self.hashmod = _sha if self.challenge['pw-algorithm'] == 'MD5': self.pwhashmod = _md5 else: self.pwhashmod = _sha self.key = "".join([self.credentials[0], ":", self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(), ":", self.challenge['realm']]) self.key = self.pwhashmod.new(self.key).hexdigest().lower() def request(self, method, request_uri, headers, content): """Modify the request headers""" keys = _get_end2end_headers(headers) keylist = "".join(["%s " % k for k in keys]) headers_val = "".join([headers[k] for k in keys]) created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime()) cnonce = _cnonce() request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val) request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['snonce'], cnonce, request_uri, created, request_digest, keylist) def response(self, response, content): challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {}) if challenge.get('reason') in ['integrity', 'stale']: return True return False class WsseAuthentication(Authentication): """This is thinly tested and should not be relied upon. At this time there isn't any third party server to test against. Blogger and TypePad implemented this algorithm at one point but Blogger has since switched to Basic over HTTPS and TypePad has implemented it wrong, by never issuing a 401 challenge but instead requiring your client to telepathically know that their endpoint is expecting WSSE profile="UsernameToken".""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'WSSE profile="UsernameToken"' iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) cnonce = _cnonce() password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( self.credentials[0], password_digest, cnonce, iso_now) class GoogleLoginAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): from urllib.parse import urlencode Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') service = challenge['googlelogin'].get('service', 'xapi') # Bloggger actually returns the service in the challenge # For the rest we guess based on the URI if service == 'xapi' and request_uri.find("calendar") > 0: service = "cl" # No point in guessing Base or Spreadsheet #elif request_uri.find("spreadsheets") > 0: # service = "wise" auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) lines = content.split('\n') d = dict([tuple(line.split("=", 1)) for line in lines if line]) if resp.status == 403: self.Auth = "" else: self.Auth = d['Auth'] def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'GoogleLogin Auth=' + self.Auth AUTH_SCHEME_CLASSES = { "basic": BasicAuthentication, "wsse": WsseAuthentication, "digest": DigestAuthentication, "hmacdigest": HmacDigestAuthentication, "googlelogin": GoogleLoginAuthentication } AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] class FileCache(object): """Uses a local directory as a store for cached files. Not really safe to use if multiple threads or processes are going to be running on the same cache. """ def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior self.cache = cache self.safe = safe if not os.path.exists(cache): os.makedirs(self.cache) def get(self, key): retval = None cacheFullPath = os.path.join(self.cache, self.safe(key)) try: f = open(cacheFullPath, "rb") retval = f.read() f.close() except IOError: pass return retval def set(self, key, value): cacheFullPath = os.path.join(self.cache, self.safe(key)) f = open(cacheFullPath, "wb") f.write(value) f.close() def delete(self, key): cacheFullPath = os.path.join(self.cache, self.safe(key)) if os.path.exists(cacheFullPath): os.remove(cacheFullPath) class Credentials(object): def __init__(self): self.credentials = [] def add(self, name, password, domain=""): self.credentials.append((domain.lower(), name, password)) def clear(self): self.credentials = [] def iter(self, domain): for (cdomain, name, password) in self.credentials: if cdomain == "" or domain == cdomain: yield (name, password) class KeyCerts(Credentials): """Identical to Credentials except that name/password are mapped to key/cert.""" pass class ProxyInfo(object): """Collect information required to use a proxy.""" def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX constants. For example: p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) """ self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass def astuple(self): return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass) def isgood(self): return socks and (self.proxy_host != None) and (self.proxy_port != None) def proxy_info_from_environment(method='http'): """ Read proxy info from the environment variables. """ if method not in ('http', 'https'): return env_var = method + '_proxy' url = os.environ.get(env_var, os.environ.get(env_var.upper())) if not url: return return proxy_info_from_url(url, method) def proxy_info_from_url(url, method='http'): """ Construct a ProxyInfo from a URL (such as http_proxy env var) """ url = urllib.parse.urlparse(url) username = None password = None port = None if '@' in url[1]: ident, host_port = url[1].split('@', 1) if ':' in ident: username, password = ident.split(':', 1) else: password = ident else: host_port = url[1] if ':' in host_port: host, port = host_port.split(':', 1) else: host = host_port if port: port = int(port) else: port = dict(https=443, http=80)[method] proxy_type = 3 # socks.PROXY_TYPE_HTTP return ProxyInfo( proxy_type = proxy_type, proxy_host = host, proxy_port = port, proxy_user = username or None, proxy_pass = password or None, ) class HTTPConnectionWithTimeout(http.client.HTTPConnection): """HTTPConnection subclass that supports timeouts HTTPConnection subclass that supports timeouts All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout """ def __init__(self, host, port=None, timeout=None, proxy_info=None): http.client.HTTPConnection.__init__(self, host, port=port, timeout=timeout) self.proxy_info = proxy_info class HTTPSConnectionWithTimeout(http.client.HTTPSConnection): """ This class allows communication via SSL. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout """ def __init__(self, host, port=None, key_file=None, cert_file=None, timeout=None, proxy_info=None, ca_certs=None, disable_ssl_certificate_validation=False): self.proxy_info = proxy_info context = None if ca_certs is None: ca_certs = CA_CERTS if (cert_file or ca_certs) and not disable_ssl_certificate_validation: if not hasattr(ssl, 'SSLContext'): raise CertificateValidationUnsupportedInPython31() context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_REQUIRED if cert_file: context.load_cert_chain(cert_file, key_file) if ca_certs: context.load_verify_locations(ca_certs) http.client.HTTPSConnection.__init__( self, host, port=port, key_file=key_file, cert_file=cert_file, timeout=timeout, context=context, check_hostname=True) SCHEME_TO_CONNECTION = { 'http': HTTPConnectionWithTimeout, 'https': HTTPSConnectionWithTimeout, } class Http(object): """An HTTP client that handles: - all methods - caching - ETags - compression, - HTTPS - Basic - Digest - WSSE and more. """ def __init__(self, cache=None, timeout=None, proxy_info=proxy_info_from_environment, ca_certs=None, disable_ssl_certificate_validation=False): """If 'cache' is a string then it is used as a directory name for a disk cache. Otherwise it must be an object that supports the same interface as FileCache. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout `proxy_info` may be: - a callable that takes the http scheme ('http' or 'https') and returns a ProxyInfo instance per request. By default, uses proxy_info_from_environment. - a ProxyInfo instance (static proxy config). - None (proxy disabled). ca_certs is the path of a file containing root CA certificates for SSL server certificate validation. By default, a CA cert file bundled with httplib2 is used. If disable_ssl_certificate_validation is true, SSL cert validation will not be performed. """ self.proxy_info = proxy_info self.ca_certs = ca_certs self.disable_ssl_certificate_validation = \ disable_ssl_certificate_validation # Map domain name to an httplib connection self.connections = {} # The location of the cache, for now a directory # where cached responses are held. if cache and isinstance(cache, str): self.cache = FileCache(cache) else: self.cache = cache # Name/password self.credentials = Credentials() # Key/cert self.certificates = KeyCerts() # authorization objects self.authorizations = [] # If set to False then no redirects are followed, even safe ones. self.follow_redirects = True # Which HTTP methods do we apply optimistic concurrency to, i.e. # which methods get an "if-match:" etag header added to them. self.optimistic_concurrency_methods = ["PUT", "PATCH"] # If 'follow_redirects' is True, and this is set to True then # all redirecs are followed, including unsafe ones. self.follow_all_redirects = False self.ignore_etag = False self.force_exception_to_status_code = False self.timeout = timeout # Keep Authorization: headers on a redirect. self.forward_authorization_headers = False def __getstate__(self): state_dict = copy.copy(self.__dict__) # In case request is augmented by some foreign object such as # credentials which handle auth if 'request' in state_dict: del state_dict['request'] if 'connections' in state_dict: del state_dict['connections'] return state_dict def __setstate__(self, state): self.__dict__.update(state) self.connections = {} def _auth_from_challenge(self, host, request_uri, headers, response, content): """A generator that creates Authorization objects that can be applied to requests. """ challenges = _parse_www_authenticate(response, 'www-authenticate') for cred in self.credentials.iter(host): for scheme in AUTH_SCHEME_ORDER: if scheme in challenges: yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) def add_credentials(self, name, password, domain=""): """Add a name and password that will be used any time a request requires authentication.""" self.credentials.add(name, password, domain) def add_certificate(self, key, cert, domain): """Add a key and cert that will be used any time a request requires authentication.""" self.certificates.add(key, cert, domain) def clear_credentials(self): """Remove all the names and passwords that are used for authentication""" self.credentials.clear() self.authorizations = [] def _conn_request(self, conn, request_uri, method, body, headers): for i in range(RETRIES): try: if conn.sock is None: conn.connect() conn.request(method, request_uri, body, headers) except socket.timeout: conn.close() raise except socket.gaierror: conn.close() raise ServerNotFoundError("Unable to find the server at %s" % conn.host) except socket.error as e: errno_ = (e.args[0].errno if isinstance(e.args[0], socket.error) else e.errno) if errno_ == errno.ECONNREFUSED: # Connection refused raise except http.client.HTTPException: if conn.sock is None: if i < RETRIES-1: conn.close() conn.connect() continue else: conn.close() raise if i < RETRIES-1: conn.close() conn.connect() continue # Just because the server closed the connection doesn't apparently mean # that the server didn't send a response. pass try: response = conn.getresponse() except socket.timeout: raise except (socket.error, http.client.HTTPException): conn.close() if i == 0: conn.close() conn.connect() continue else: raise else: content = b"" if method == "HEAD": conn.close() else: content = response.read() response = Response(response) if method != "HEAD": content = _decompressContent(response, content) break return (response, content) def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): """Do the actual request using the connection object and also follow one level of redirects if necessary""" auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] auth = auths and sorted(auths)[0][1] or None if auth: auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers) if auth: if auth.response(response, body): auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers ) response._stale_digest = 1 if response.status == 401: for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): authorization.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) if response.status != 401: self.authorizations.append(authorization) authorization.response(response, body) break if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: # Pick out the location header and basically start from the beginning # remembering first to strip the ETag header and decrement our 'depth' if redirections: if 'location' not in response and response.status != 300: raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) # Fix-up relative redirects (which violate an RFC 2616 MUST) if 'location' in response: location = response['location'] (scheme, authority, path, query, fragment) = parse_uri(location) if authority == None: response['location'] = urllib.parse.urljoin(absolute_uri, location) if response.status == 301 and method in ["GET", "HEAD"]: response['-x-permanent-redirect-url'] = response['location'] if 'content-location' not in response: response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) if 'if-none-match' in headers: del headers['if-none-match'] if 'if-modified-since' in headers: del headers['if-modified-since'] if 'authorization' in headers and not self.forward_authorization_headers: del headers['authorization'] if 'location' in response: location = response['location'] old_response = copy.deepcopy(response) if 'content-location' not in old_response: old_response['content-location'] = absolute_uri redirect_method = method if response.status in [302, 303]: redirect_method = "GET" body = None (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) response.previous = old_response else: raise RedirectLimit("Redirected more times than redirection_limit allows.", response, content) elif response.status in [200, 203] and method in ["GET", "HEAD"]: # Don't cache 206's since we aren't going to handle byte range requests if 'content-location' not in response: response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) return (response, content) def _normalize_headers(self, headers): return _normalize_headers(headers) # Need to catch and rebrand some exceptions # Then need to optionally turn all exceptions into status codes # including all socket.* and httplib.* exceptions. def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): """ Performs a single HTTP request. The 'uri' is the URI of the HTTP resource and can begin with either 'http' or 'https'. The value of 'uri' must be an absolute URI. The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. There is no restriction on the methods allowed. The 'body' is the entity body to be sent with the request. It is a string object. Any extra headers that are to be sent with the request should be provided in the 'headers' dictionary. The maximum number of redirect to follow before raising an exception is 'redirections. The default is 5. The return value is a tuple of (response, content), the first being and instance of the 'Response' class, the second being a string that contains the response entity body. """ try: if headers is None: headers = {} else: headers = self._normalize_headers(headers) if 'user-agent' not in headers: headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__ uri = iri2uri(uri) (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) domain_port = authority.split(":")[0:2] if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http': scheme = 'https' authority = domain_port[0] conn_key = scheme+":"+authority if conn_key in self.connections: conn = self.connections[conn_key] else: if not connection_type: connection_type = SCHEME_TO_CONNECTION[scheme] certs = list(self.certificates.iter(authority)) if issubclass(connection_type, HTTPSConnectionWithTimeout): if certs: conn = self.connections[conn_key] = connection_type( authority, key_file=certs[0][0], cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info, ca_certs=self.ca_certs, disable_ssl_certificate_validation= self.disable_ssl_certificate_validation) else: conn = self.connections[conn_key] = connection_type( authority, timeout=self.timeout, proxy_info=self.proxy_info, ca_certs=self.ca_certs, disable_ssl_certificate_validation= self.disable_ssl_certificate_validation) else: conn = self.connections[conn_key] = connection_type( authority, timeout=self.timeout, proxy_info=self.proxy_info) conn.set_debuglevel(debuglevel) if 'range' not in headers and 'accept-encoding' not in headers: headers['accept-encoding'] = 'gzip, deflate' info = email.message.Message() cached_value = None if self.cache: cachekey = defrag_uri cached_value = self.cache.get(cachekey) if cached_value: try: info, content = cached_value.split(b'\r\n\r\n', 1) info = email.message_from_bytes(info) for k, v in info.items(): if v.startswith('=?') and v.endswith('?='): info.replace_header(k, str(*email.header.decode_header(v)[0])) except (IndexError, ValueError): self.cache.delete(cachekey) cachekey = None cached_value = None else: cachekey = None if method in self.optimistic_concurrency_methods and self.cache and 'etag' in info and not self.ignore_etag and 'if-match' not in headers: # http://www.w3.org/1999/04/Editing/ headers['if-match'] = info['etag'] if method not in ["GET", "HEAD"] and self.cache and cachekey: # RFC 2616 Section 13.10 self.cache.delete(cachekey) # Check the vary header in the cache to see if this request # matches what varies in the cache. if method in ['GET', 'HEAD'] and 'vary' in info: vary = info['vary'] vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header value = info[key] if headers.get(header, None) != value: cached_value = None break if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: if '-x-permanent-redirect-url' in info: # Should cached permanent redirects be counted in our redirection count? For now, yes. if redirections <= 0: raise RedirectLimit("Redirected more times than redirection_limit allows.", {}, "") (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) response.previous = Response(info) response.previous.fromcache = True else: # Determine our course of action: # Is the cached entry fresh or stale? # Has the client requested a non-cached response? # # There seems to be three possible answers: # 1. [FRESH] Return the cache entry w/o doing a GET # 2. [STALE] Do the GET (but add in cache validators if available) # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request entry_disposition = _entry_disposition(info, headers) if entry_disposition == "FRESH": if not cached_value: info['status'] = '504' content = b"" response = Response(info) if cached_value: response.fromcache = True return (response, content) if entry_disposition == "STALE": if 'etag' in info and not self.ignore_etag and not 'if-none-match' in headers: headers['if-none-match'] = info['etag'] if 'last-modified' in info and not 'last-modified' in headers: headers['if-modified-since'] = info['last-modified'] elif entry_disposition == "TRANSPARENT": pass (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) if response.status == 304 and method == "GET": # Rewrite the cache entry with the new end-to-end headers # Take all headers that are in response # and overwrite their values in info. # unless they are hop-by-hop, or are listed in the connection header. for key in _get_end2end_headers(response): info[key] = response[key] merged_response = Response(info) if hasattr(response, "_stale_digest"): merged_response._stale_digest = response._stale_digest _updateCache(headers, merged_response, content, self.cache, cachekey) response = merged_response response.status = 200 response.fromcache = True elif response.status == 200: content = new_content else: self.cache.delete(cachekey) content = new_content else: cc = _parse_cache_control(headers) if 'only-if-cached'in cc: info['status'] = '504' response = Response(info) content = b"" else: (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) except Exception as e: if self.force_exception_to_status_code: if isinstance(e, HttpLib2ErrorWithResponse): response = e.response content = e.content response.status = 500 response.reason = str(e) elif isinstance(e, socket.timeout): content = b"Request Timeout" response = Response({ "content-type": "text/plain", "status": "408", "content-length": len(content) }) response.reason = "Request Timeout" else: content = str(e).encode('utf-8') response = Response({ "content-type": "text/plain", "status": "400", "content-length": len(content) }) response.reason = "Bad Request" else: raise return (response, content) class Response(dict): """An object more like email.message than httplib.HTTPResponse.""" """Is this response from our local cache""" fromcache = False """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ version = 11 "Status code returned by server. " status = 200 """Reason phrase returned by server.""" reason = "Ok" previous = None def __init__(self, info): # info is either an email.message or # an httplib.HTTPResponse object. if isinstance(info, http.client.HTTPResponse): for key, value in info.getheaders(): key = key.lower() prev = self.get(key) if prev is not None: value = ', '.join((prev, value)) self[key] = value self.status = info.status self['status'] = str(self.status) self.reason = info.reason self.version = info.version elif isinstance(info, email.message.Message): for key, value in list(info.items()): self[key.lower()] = value self.status = int(self['status']) else: for key, value in info.items(): self[key.lower()] = value self.status = int(self.get('status', self.status)) def __getattr__(self, name): if name == 'dict': return self else: raise AttributeError(name)
allotria/intellij-community
refs/heads/master
python/helpers/tests/generator3_tests/data/FileSystemUtil/copy_skeleton_module_replaced_with_package/dst/before/foo/bar/baz.py
50
version = 1
pfnet/chainer
refs/heads/master
chainer/functions/array/tile.py
3
import six import chainer from chainer import backend from chainer import function_node from chainer.utils import type_check class Tile(function_node.FunctionNode): """Tiling of an array.""" def __init__(self, reps): if isinstance(reps, six.integer_types): self.reps = (reps,) elif isinstance(reps, tuple) and all( isinstance(x, six.integer_types) for x in reps): self.reps = reps else: msg = 'reps must be int or tuple of ints.\n' \ 'Actual: {0}'.format(type(reps)) raise TypeError(msg) if not all(x >= 0 for x in self.reps): raise ValueError('All elements in reps must be zero or larger') def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) def forward(self, inputs): self._in_shape = inputs[0].shape xp = backend.get_array_module(*inputs) return xp.tile(inputs[0], self.reps), def backward(self, indexes, grad_outputs): reps = self.reps shape = tuple(self._in_shape) ndim = len(shape) # Ensure input and reps have the same length. if ndim > len(reps): reps = (1,) * (ndim - len(reps)) + reps elif ndim < len(reps): shape = (1,) * (len(reps) - ndim) + shape gy, = grad_outputs # Reshape so that base axis and reps axis can be distinguished. new_shape = [] for i in range(gy.ndim): new_shape.append(reps[i]) new_shape.append(shape[i]) new_shape = tuple(new_shape) # Sum along reps axis reps_axis = tuple(range(0, 2 * gy.ndim, 2)) gy = gy.reshape(new_shape) gy = chainer.functions.sum(gy, axis=reps_axis) if ndim < len(reps): return gy.reshape(self._in_shape), else: return gy, def tile(x, reps): """Construct an array by tiling a given array. Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. Let the length of ``reps`` be ``d``. If ``x.ndim < d``, ``x`` is treated as ``d``-dimensional array by prepending new axes. For example, when the shape of ``x`` is ``(2,)`` and tiled with 2-dim repetitions, ``x`` is treated as the shape ``(1, 2)``. If ``x.ndim > d``, ``reps`` is treated as ``x.ndim``-dimensional by pre-pending 1's. For example, when the shape of ``x`` is ``(2, 3, 2, 3)``, the 2-dim ``reps`` of ``(2, 2)`` is treated as ``(1, 1, 2, 2)``. reps (:class:`int` or :class:`tuple` of :class:`int` s): The number of times which ``x`` is replicated along each axis. Returns: ~chainer.Variable: The tiled output Variable. Let the length of ``reps`` be ``d``, the output has the dimension of ``max(d, x.ndim)``. .. admonition:: Example >>> x = np.array([0, 1, 2]) >>> x.shape (3,) >>> y = F.tile(x, 2) >>> y.shape (6,) >>> y.array array([0, 1, 2, 0, 1, 2]) >>> y = F.tile(x, (2, 2)) >>> y.shape (2, 6) >>> y.array array([[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) >>> y = F.tile(x, (2, 1, 2)) >>> y.shape (2, 1, 6) >>> y.array array([[[0, 1, 2, 0, 1, 2]], <BLANKLINE> [[0, 1, 2, 0, 1, 2]]]) >>> x = np.array([[1, 2], [3, 4]]) >>> x.shape (2, 2) >>> y = F.tile(x, 2) >>> y.shape (2, 4) >>> y.array array([[1, 2, 1, 2], [3, 4, 3, 4]]) >>> y = F.tile(x, (2, 2)) >>> y.shape (4, 4) >>> y.array array([[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) >>> y = F.tile(x, (2, 1, 2)) >>> y.shape (2, 2, 4) >>> y.array array([[[1, 2, 1, 2], [3, 4, 3, 4]], <BLANKLINE> [[1, 2, 1, 2], [3, 4, 3, 4]]]) """ return Tile(reps).apply((x,))[0]
geary/voter-info-2012-test
refs/heads/master
voter-info/shapes/makepolys.py
12
#!/usr/bin/env python # makepolys.py import codecs import json import math import os import random import re import shutil import stat import sys import time from geo import Geo import shpUtils import states #states = json.load( open('states.json') ) jsonpath = 'json' shapespath = 'shapefiles' geo = Geo() keysep = '|' states.byNumber = {} useOther = { 'CT': ( 'towns', 'cs09_d00' ), 'MA': ( 'towns', 'cs25_d00' ), 'NH': ( 'towns', 'cs33_d00' ), 'VT': ( 'towns', 'cs50_d00' ), 'KS': ( 'congressional', 'cd20_110' ), 'NE': ( 'congressional', 'cd31_110' ), 'NM': ( 'congressional', 'cd35_110' ), } districtNames = { 'CD1': 'First Congressional District', 'CD2': 'Second Congressional District', 'CD3': 'Third Congressional District', 'CD4': 'Fourth Congressional District', } def loadshapefile( filename ): print 'Loading shapefile %s' % filename t1 = time.time() shapefile = shpUtils.loadShapefile( '%s/%s' %( shapespath, filename ) ) t2 = time.time() print '%0.3f seconds load time' %( t2 - t1 ) return shapefile #def randomColor(): # def hh(): return '%02X' %( random.random() *128 + 96 ) # return hh() + hh() + hh() featuresByName = {} def featureByName( feature ): info = feature['info'] name = info['NAME'] if name not in featuresByName: featuresByName[name] = { 'feature': feature #, #'color': randomColor() } return featuresByName[name] #def filterCONUS( features ): # result = [] # for feature in features: # shape = feature['shape'] # if shape['type'] != 5: continue # info = feature['info'] # state = int(info['STATE']) # if state == 2: continue # Alaska # if state == 15: continue # Hawaii # if state == 72: continue # Puerto Rico # result.append( feature ) # return result def featuresBounds( features ): bounds = [ [ None, None ], [ None, None ] ] for feature in features: shape = feature['shape'] if shape['type'] == 5: for part in shape['parts']: bounds = geo.extendBounds( bounds, part['bounds'] ) return bounds def writeFile( filename, data ): f = open( filename, 'wb' ) f.write( data ) f.close() def readShapefile( filename ): print '----------------------------------------' print 'Loading %s' % filename shapefile = loadshapefile( filename ) features = shapefile['features'] print '%d features' % len(features) #conus = filterCONUS( features ) #conusBounds = featuresBounds( conus ) #stateFeatures = filterCONUS( stateFeatures ) #print '%d features in CONUS states' % len(stateFeatures) #writeFile( 'features.csv', shpUtils.dumpFeatureInfo(features) ) nPoints = nPolys = 0 places = {} for feature in features: shape = feature['shape'] if shape['type'] != 5: continue info = feature['info'] name = info['NAME'].decode( 'cp850' ).encode( 'utf-8' ) name = re.sub( '^(\d+)\x00.*$', 'CD\\1', name ) # congressional district name = districtNames.get( name, name ) state = info['STATE'] key = name + keysep + state if key not in places: places[key] = { 'name': name, 'state': state, 'maxarea': 0.0, 'bounds': [ [ None, None ], [ None, None ] ], 'shapes': [] } place = places[key] shapes = place['shapes'] for part in shape['parts']: nPolys += 1 points = part['points'] n = len(points) - 1 nPoints += n pts = [] area = part['area'] if area == 0: continue bounds = part['bounds'] place['bounds'] = geo.extendBounds( place['bounds'], bounds ) centroid = part['centroid'] if area > place['maxarea']: place['centroid'] = centroid place['maxarea'] = area points = part['points'] for j in xrange(n): point = points[j] #pts.append( '[%.4f,%.4f]' %( float(point[0]), float(point[1]) ) ) pts.append( '{x:%.4f,y:%.4f}' %( float(point[0]), float(point[1]) ) ) #shapes.append( '{area:%.4f,bounds:[[%.4f,%.4f],[%.4f,%.4f]],centroid:[%.4f,%.4f],points:[%s]}' %( shapes.append( '{points:[%s]}' %( #area, #bounds[0][0], bounds[0][1], #bounds[1][0], bounds[1][1], #centroid[0], centroid[1], ','.join(pts) ) ) print '%d points in %d places' %( nPoints, len(places) ) return shapefile, places def writeUS( places, path ): json = [] keys = places.keys() keys.sort() for key in keys: abbr = states.byNumber[ places[key]['state'] ]['abbr'].lower() writeJSON( '%s.js' % abbr, getPlaceJSON( places, key, abbr, 'state' ) ) #def writeStates( places, path ): # p = {} # for k in places: # if places[k] != None: # p[k] = places[k] # places = p # keys = places.keys() # keys.sort() # for key in keys: # name, number = key.split(keysep) # state = states.byNumber[number] # state['json'].append( getPlaceJSON( places, key, state['abbr'].lower(), 'county' ) ) # for state in states.array: # writeJSON( path, state['abbr'].lower(), state['json'] ) def writeJSON( path, json ): file = '%s/%s' %( jsonpath, path ) print 'Writing %s' % file writeFile( file, 'GoogleElectionMap.shapeReady(%s)' %( json ) ) def getPlaceJSON( places, key, state, type ): place = places[key] if not place: return '' bounds = place['bounds'] centroid = place['centroid'] return '{name:"%s", type:"%s",state:"%s",bounds:[[%.4f,%.4f],[%.4f,%.4f]],centroid:[%.4f,%.4f],shapes:[%s]}' %( key.split(keysep)[0], type, state, bounds[0][0], bounds[0][1], bounds[1][0], bounds[1][1], centroid[0], centroid[1], ','.join(place['shapes']) ) def generateUS( detail, path='' ): shapefile, places = readShapefile( 'states/st99_d00_shp-%s/st99_d00.shp' % detail ) for key in places: name, number = key.split(keysep) state = states.byName[name] state['json'] = [] state['counties'] = [] state['number'] = number states.byNumber[number] = state writeUS( places, path ) #def generateStates( detail, path ): # shapefile, places = readShapefile( 'counties/co99_d00_shp-%s/co99_d00.shp' % detail ) # for key, place in places.iteritems(): # name, number = key.split(keysep) # state = states.byNumber[number] # abbr = state['abbr'] # if abbr not in useOther: # state['counties'].append( place ) # else: # places[key] = None # for abbr, file in useOther.iteritems(): # state = states.byAbbr[abbr] # number = state['number'] # othershapefile, otherplaces = readShapefile( # '%(base)s/%(name)s_shp-%(detail)s/%(name)s.shp' %{ # 'base': file[0], # 'name': file[1], # 'detail': detail # } ) # for key, place in otherplaces.iteritems(): # name, number = key.split(keysep) # state = states.byNumber[number] # state['counties'].append( place ) # places[key] = place # writeStates( places, path ) #generateUS( 0, 'full' ) #generateUS( 25, '25' ) generateUS( '00' ) #generateStates( 80, 'detailed' ) print 'Done!'
Omegaphora/external_chromium_org
refs/heads/lp5.1
tools/clang/scripts/test_tool.py
27
#!/usr/bin/env python # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Test harness for chromium clang tools.""" import difflib import glob import json import os import os.path import subprocess import shutil import sys def _GenerateCompileCommands(files, include_paths): """Returns a JSON string containing a compilation database for the input.""" include_path_flags = ' '.join('-I %s' % include_path for include_path in include_paths) return json.dumps([{'directory': '.', 'command': 'clang++ -fsyntax-only %s -c %s' % ( include_path_flags, f), 'file': f} for f in files], indent=2) def _NumberOfTestsToString(tests): """Returns an English describing the number of tests.""" return "%d test%s" % (tests, 's' if tests != 1 else '') def main(argv): if len(argv) < 1: print 'Usage: test_tool.py <clang tool>' print ' <clang tool> is the clang tool to be tested.' sys.exit(1) tool_to_test = argv[0] tools_clang_scripts_directory = os.path.dirname(os.path.realpath(__file__)) tools_clang_directory = os.path.dirname(tools_clang_scripts_directory) test_directory_for_tool = os.path.join( tools_clang_directory, tool_to_test, 'tests') compile_database = os.path.join(test_directory_for_tool, 'compile_commands.json') source_files = glob.glob(os.path.join(test_directory_for_tool, '*-original.cc')) actual_files = ['-'.join([source_file.rsplit('-', 1)[0], 'actual.cc']) for source_file in source_files] expected_files = ['-'.join([source_file.rsplit('-', 1)[0], 'expected.cc']) for source_file in source_files] include_paths = [] include_paths.append( os.path.realpath(os.path.join(tools_clang_directory, '../..'))) # Many gtest headers expect to have testing/gtest/include in the include # search path. include_paths.append( os.path.realpath(os.path.join(tools_clang_directory, '../..', 'testing/gtest/include'))) try: # Set up the test environment. for source, actual in zip(source_files, actual_files): shutil.copyfile(source, actual) # Stage the test files in the git index. If they aren't staged, then # run_tools.py will skip them when applying replacements. args = ['git', 'add'] args.extend(actual_files) subprocess.check_call(args) # Generate a temporary compilation database to run the tool over. with open(compile_database, 'w') as f: f.write(_GenerateCompileCommands(actual_files, include_paths)) args = ['python', os.path.join(tools_clang_scripts_directory, 'run_tool.py'), tool_to_test, test_directory_for_tool] args.extend(actual_files) run_tool = subprocess.Popen(args, stdout=subprocess.PIPE) stdout, _ = run_tool.communicate() if run_tool.returncode != 0: print 'run_tool failed:\n%s' % stdout sys.exit(1) passed = 0 failed = 0 for expected, actual in zip(expected_files, actual_files): print '[ RUN ] %s' % os.path.relpath(actual) expected_output = actual_output = None with open(expected, 'r') as f: expected_output = f.readlines() with open(actual, 'r') as f: actual_output = f.readlines() if actual_output != expected_output: failed += 1 for line in difflib.unified_diff(expected_output, actual_output, fromfile=os.path.relpath(expected), tofile=os.path.relpath(actual)): sys.stdout.write(line) print '[ FAILED ] %s' % os.path.relpath(actual) # Don't clean up the file on failure, so the results can be referenced # more easily. continue print '[ OK ] %s' % os.path.relpath(actual) passed += 1 os.remove(actual) if failed == 0: os.remove(compile_database) print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files)) if passed > 0: print '[ PASSED ] %s.' % _NumberOfTestsToString(passed) if failed > 0: print '[ FAILED ] %s.' % _NumberOfTestsToString(failed) finally: # No matter what, unstage the git changes we made earlier to avoid polluting # the index. args = ['git', 'reset', '--quiet', 'HEAD'] args.extend(actual_files) subprocess.call(args) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Yen-Chung-En/w16b_test
refs/heads/master
static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/font.py
601
from browser import html from . import surface def init(): return def quit(): return def get_init(): return True def get_default_font(): return "10px sans-serif" class Font: def __init__(self, obj, size): self._obj=obj self._size=size def render(self, text, antialias, color, background=None): _canvas=html.CANVAS() _ctx=_canvas.getContext('2d') if background is not None: _ctx.fillStyle='rgb(%s,%s,%s)' % color _ctx.fillRect(0,0, _canvas.width, _canvas.height) _ctx.fillStyle='rgb(%s,%s,%s)' % color _ctx.fillText(text, 0, 0) return surface.Surface(surf=_canvas) #surface def size(self, text): _canvas = html.CANVAS(width=1000, height=1000) _ctx = _canvas.getContext('2d') #_ctx.fillText(text, 0, 0) # get text metrics _metrics = _ctx.measureText(text); return (_metrics.width, _metrics.height)
ricklupton/ipysankeywidget
refs/heads/master
ipysankeywidget/_version.py
1
version_info = (0, 4, 1, 'final', 0) _specifier_ = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': ''} __version__ = '%s.%s.%s%s'%(version_info[0], version_info[1], version_info[2], '' if version_info[3]=='final' else _specifier_[version_info[3]]+str(version_info[4]))
MaplePlan/djwp
refs/heads/master
django/conf/locale/zh_TW/__init__.py
12133432
SerCeMan/intellij-community
refs/heads/master
python/testData/refactoring/move/moveFunctionFromUnimportableModule/after/src/src-unimportable.py
12133432
d/hamster-applet
refs/heads/master
src/docky_control/2.1/hamster_control.py
1
#!/usr/bin/env python # # Copyright (C) 2010 Toms Baugis # # Original code from Banshee control, # Copyright (C) 2009-2010 Jason Smith, Rico Tzschichholz # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import atexit import gobject import sys, os from subprocess import Popen try: import gtk from dockmanager.dockmanager import DockManagerItem, DockManagerSink, DOCKITEM_IFACE from signal import signal, SIGTERM from sys import exit except ImportError, e: print e exit() from hamster import client from hamster.utils import stuff, i18n i18n.setup_i18n() class HamsterItem(DockManagerItem): def __init__(self, sink, path): DockManagerItem.__init__(self, sink, path) self.storage = client.Storage() self.storage.connect("facts-changed", lambda storage: self.refresh_hamster()) self.storage.connect("activities-changed", lambda storage: self.refresh_hamster()) self.id_map = {} #menu items self.update_text() self.add_actions() gobject.timeout_add_seconds(60, self.refresh_hamster) def refresh_hamster(self): try: self.update_text() finally: # we want to go on no matter what, so in case of any error we find out about it sooner return True def update_text(self): today = self.storage.get_todays_facts() if today and today[-1].end_time is None: fact = today[-1] self.set_tooltip("%s - %s" % (fact.activity, fact.category)) self.set_badge(stuff.format_duration(fact.delta, human=False)) else: self.set_tooltip(_("No activity")) self.reset_badge() def menu_pressed(self, menu_id): if self.id_map[menu_id] == _("Overview"): Popen(["hamster-time-tracker", "overview"]) elif self.id_map[menu_id] == _("Preferences"): Popen(["hamster-time-tracker", "preferences"]) self.add_actions() # TODO - figure out why is it that we have to regen all menu items after each click def add_actions(self): # first clear the menu for k in self.id_map.keys(): self.remove_menu_item(k) self.id_map = {} # now add buttons self.add_menu_item(_("Overview"), "") self.add_menu_item(_("Preferences"), "preferences-desktop-personal") class HamsterSink(DockManagerSink): def item_path_found(self, pathtoitem, item): if item.Get(DOCKITEM_IFACE, "DesktopFile", dbus_interface="org.freedesktop.DBus.Properties").endswith ("hamster-time-tracker.desktop"): self.items[pathtoitem] = HamsterItem(self, pathtoitem) hamstersink = HamsterSink() def cleanup(): hamstersink.dispose() if __name__ == "__main__": mainloop = gobject.MainLoop(is_running=True) atexit.register (cleanup) signal(SIGTERM, lambda signum, stack_frame: exit(1)) while mainloop.is_running(): mainloop.run()
nathanpucheril/PyBayes
refs/heads/master
PyBayes/utils.py
1
# Utilities for Bayes Net # ________________________ # @author Nathan Pucheril # @author Keith Hardaway def islist_like(iterable): return hasattr(iterable, '__iter__') and not isinstance(iterable, str) and not isinstance(iterable, dict)
dmytroKarataiev/MachineLearning
refs/heads/master
learning/algorithms/svm/svm.py
1
import sys from learning.algorithms.prep_terrain_data import makeTerrainData from learning.algorithms.class_vis import prettyPicture, output_image import matplotlib.pyplot as plt import copy import numpy as np import pylab as pl features_train, labels_train, features_test, labels_test = makeTerrainData() ########################## SVM ################################# ### we handle the import statement and SVC creation for you here from sklearn.svm import SVC clf = SVC(kernel="linear", gamma=1.0) #### now your job is to fit the classifier #### using the training features/labels, and to #### make a set of predictions on the test data clf.fit(features_train, labels_train) #### store your predictions in a list named pred pred = clf.predict(features_test) from sklearn.metrics import accuracy_score acc = accuracy_score(pred, labels_test) def submitAccuracy(): prettyPicture(clf, features_test, labels_test) output_image("test.png", "png", open("test.png", "rb").read()) return acc print submitAccuracy()
Jumpscale/jumpscale6_core
refs/heads/master
lib/JumpScale/baselib/serializers/SerializerPickle.py
1
import cPickle class SerializerPickle(object): def dumps(self,obj): return cPickle.dumps(obj) def loads(self,s): return cPickle.loads(s)
0ps/TeamTalk
refs/heads/master
win-client/3rdParty/src/json/test/generate_expected.py
257
import glob import os.path for path in glob.glob( '*.json' ): text = file(path,'rt').read() target = os.path.splitext(path)[0] + '.expected' if os.path.exists( target ): print 'skipping:', target else: print 'creating:', target file(target,'wt').write(text)