repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
iuliat/nova
refs/heads/master
nova/db/base.py
64
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo_config import cfg from oslo_utils import importutils db_driver_opt = cfg.StrOpt('db_driver', default='nova.db', help='The driver to use for database access') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver)
kdwink/intellij-community
refs/heads/master
python/lib/Lib/distutils/command/config.py
138
"""distutils.command.config Implements the Distutils 'config' command, a (mostly) empty command class that exists mainly to be sub-classed by specific module distributions and applications. The idea is that while every "config" command is different, at least they're all named the same, and users always see "config" in the list of standard commands. Also, this is a good place to put common configure-like tasks: "try to compile this C code", or "figure out where this header file lives". """ # This module should be kept compatible with Python 2.1. __revision__ = "$Id: config.py 37828 2004-11-10 22:23:15Z loewis $" import sys, os, string, re from types import * from distutils.core import Command from distutils.errors import DistutilsExecError from distutils.sysconfig import customize_compiler from distutils import log LANG_EXT = {'c': '.c', 'c++': '.cxx'} class config (Command): description = "prepare to build" user_options = [ ('compiler=', None, "specify the compiler type"), ('cc=', None, "specify the compiler executable"), ('include-dirs=', 'I', "list of directories to search for header files"), ('define=', 'D', "C preprocessor macros to define"), ('undef=', 'U', "C preprocessor macros to undefine"), ('libraries=', 'l', "external C libraries to link with"), ('library-dirs=', 'L', "directories to search for external C libraries"), ('noisy', None, "show every action (compile, link, run, ...) taken"), ('dump-source', None, "dump generated source files before attempting to compile them"), ] # The three standard command methods: since the "config" command # does nothing by default, these are empty. def initialize_options (self): self.compiler = None self.cc = None self.include_dirs = None #self.define = None #self.undef = None self.libraries = None self.library_dirs = None # maximal output for now self.noisy = 1 self.dump_source = 1 # list of temporary files generated along-the-way that we have # to clean at some point self.temp_files = [] def finalize_options (self): if self.include_dirs is None: self.include_dirs = self.distribution.include_dirs or [] elif type(self.include_dirs) is StringType: self.include_dirs = string.split(self.include_dirs, os.pathsep) if self.libraries is None: self.libraries = [] elif type(self.libraries) is StringType: self.libraries = [self.libraries] if self.library_dirs is None: self.library_dirs = [] elif type(self.library_dirs) is StringType: self.library_dirs = string.split(self.library_dirs, os.pathsep) def run (self): pass # Utility methods for actual "config" commands. The interfaces are # loosely based on Autoconf macros of similar names. Sub-classes # may use these freely. def _check_compiler (self): """Check that 'self.compiler' really is a CCompiler object; if not, make it one. """ # We do this late, and only on-demand, because this is an expensive # import. from distutils.ccompiler import CCompiler, new_compiler if not isinstance(self.compiler, CCompiler): self.compiler = new_compiler(compiler=self.compiler, dry_run=self.dry_run, force=1) customize_compiler(self.compiler) if self.include_dirs: self.compiler.set_include_dirs(self.include_dirs) if self.libraries: self.compiler.set_libraries(self.libraries) if self.library_dirs: self.compiler.set_library_dirs(self.library_dirs) def _gen_temp_sourcefile (self, body, headers, lang): filename = "_configtest" + LANG_EXT[lang] file = open(filename, "w") if headers: for header in headers: file.write("#include <%s>\n" % header) file.write("\n") file.write(body) if body[-1] != "\n": file.write("\n") file.close() return filename def _preprocess (self, body, headers, include_dirs, lang): src = self._gen_temp_sourcefile(body, headers, lang) out = "_configtest.i" self.temp_files.extend([src, out]) self.compiler.preprocess(src, out, include_dirs=include_dirs) return (src, out) def _compile (self, body, headers, include_dirs, lang): src = self._gen_temp_sourcefile(body, headers, lang) if self.dump_source: dump_file(src, "compiling '%s':" % src) (obj,) = self.compiler.object_filenames([src]) self.temp_files.extend([src, obj]) self.compiler.compile([src], include_dirs=include_dirs) return (src, obj) def _link (self, body, headers, include_dirs, libraries, library_dirs, lang): (src, obj) = self._compile(body, headers, include_dirs, lang) prog = os.path.splitext(os.path.basename(src))[0] self.compiler.link_executable([obj], prog, libraries=libraries, library_dirs=library_dirs, target_lang=lang) if self.compiler.exe_extension is not None: prog = prog + self.compiler.exe_extension self.temp_files.append(prog) return (src, obj, prog) def _clean (self, *filenames): if not filenames: filenames = self.temp_files self.temp_files = [] log.info("removing: %s", string.join(filenames)) for filename in filenames: try: os.remove(filename) except OSError: pass # XXX these ignore the dry-run flag: what to do, what to do? even if # you want a dry-run build, you still need some sort of configuration # info. My inclination is to make it up to the real config command to # consult 'dry_run', and assume a default (minimal) configuration if # true. The problem with trying to do it here is that you'd have to # return either true or false from all the 'try' methods, neither of # which is correct. # XXX need access to the header search path and maybe default macros. def try_cpp (self, body=None, headers=None, include_dirs=None, lang="c"): """Construct a source file from 'body' (a string containing lines of C/C++ code) and 'headers' (a list of header files to include) and run it through the preprocessor. Return true if the preprocessor succeeded, false if there were any errors. ('body' probably isn't of much use, but what the heck.) """ from distutils.ccompiler import CompileError self._check_compiler() ok = 1 try: self._preprocess(body, headers, include_dirs, lang) except CompileError: ok = 0 self._clean() return ok def search_cpp (self, pattern, body=None, headers=None, include_dirs=None, lang="c"): """Construct a source file (just like 'try_cpp()'), run it through the preprocessor, and return true if any line of the output matches 'pattern'. 'pattern' should either be a compiled regex object or a string containing a regex. If both 'body' and 'headers' are None, preprocesses an empty file -- which can be useful to determine the symbols the preprocessor and compiler set by default. """ self._check_compiler() (src, out) = self._preprocess(body, headers, include_dirs, lang) if type(pattern) is StringType: pattern = re.compile(pattern) file = open(out) match = 0 while 1: line = file.readline() if line == '': break if pattern.search(line): match = 1 break file.close() self._clean() return match def try_compile (self, body, headers=None, include_dirs=None, lang="c"): """Try to compile a source file built from 'body' and 'headers'. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError self._check_compiler() try: self._compile(body, headers, include_dirs, lang) ok = 1 except CompileError: ok = 0 log.info(ok and "success!" or "failure.") self._clean() return ok def try_link (self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c"): """Try to compile and link a source file, built from 'body' and 'headers', to executable form. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError, LinkError self._check_compiler() try: self._link(body, headers, include_dirs, libraries, library_dirs, lang) ok = 1 except (CompileError, LinkError): ok = 0 log.info(ok and "success!" or "failure.") self._clean() return ok def try_run (self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c"): """Try to compile, link to an executable, and run a program built from 'body' and 'headers'. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError, LinkError self._check_compiler() try: src, obj, exe = self._link(body, headers, include_dirs, libraries, library_dirs, lang) self.spawn([exe]) ok = 1 except (CompileError, LinkError, DistutilsExecError): ok = 0 log.info(ok and "success!" or "failure.") self._clean() return ok # -- High-level methods -------------------------------------------- # (these are the ones that are actually likely to be useful # when implementing a real-world config command!) def check_func (self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=0, call=0): """Determine if function 'func' is available by constructing a source file that refers to 'func', and compiles and links it. If everything succeeds, returns true; otherwise returns false. The constructed source file starts out by including the header files listed in 'headers'. If 'decl' is true, it then declares 'func' (as "int func()"); you probably shouldn't supply 'headers' and set 'decl' true in the same call, or you might get errors about a conflicting declarations for 'func'. Finally, the constructed 'main()' function either references 'func' or (if 'call' is true) calls it. 'libraries' and 'library_dirs' are used when linking. """ self._check_compiler() body = [] if decl: body.append("int %s ();" % func) body.append("int main () {") if call: body.append(" %s();" % func) else: body.append(" %s;" % func) body.append("}") body = string.join(body, "\n") + "\n" return self.try_link(body, headers, include_dirs, libraries, library_dirs) # check_func () def check_lib (self, library, library_dirs=None, headers=None, include_dirs=None, other_libraries=[]): """Determine if 'library' is available to be linked against, without actually checking that any particular symbols are provided by it. 'headers' will be used in constructing the source file to be compiled, but the only effect of this is to check if all the header files listed are available. Any libraries listed in 'other_libraries' will be included in the link, in case 'library' has symbols that depend on other libraries. """ self._check_compiler() return self.try_link("int main (void) { }", headers, include_dirs, [library]+other_libraries, library_dirs) def check_header (self, header, include_dirs=None, library_dirs=None, lang="c"): """Determine if the system header file named by 'header_file' exists and can be found by the preprocessor; return true if so, false otherwise. """ return self.try_cpp(body="/* No body */", headers=[header], include_dirs=include_dirs) # class config def dump_file (filename, head=None): if head is None: print filename + ":" else: print head file = open(filename) sys.stdout.write(file.read()) file.close()
constverum/PyProxyChecker
refs/heads/master
proxybroker/__init__.py
2
""" Copyright © 2015-2018 Constverum <[email protected]>. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __title__ = 'ProxyBroker' __package__ = 'proxybroker' __version__ = '0.3.2' __short_description__ = '[Finder/Checker/Server] Finds public proxies from multiple sources and concurrently checks them. Supports HTTP(S) and SOCKS4/5.' # noqa __author__ = 'Constverum' __author_email__ = '[email protected]' __url__ = 'https://github.com/constverum/ProxyBroker' __license__ = 'Apache License, Version 2.0' __copyright__ = 'Copyright 2015-2018 Constverum' from .proxy import Proxy # noqa from .judge import Judge # noqa from .providers import Provider # noqa from .checker import Checker # noqa from .server import Server, ProxyPool # noqa from .api import Broker # noqa import logging # noqa import warnings # noqa logger = logging.getLogger('asyncio') logger.addFilter(logging.Filter('has no effect when using ssl')) warnings.simplefilter('always', UserWarning) warnings.simplefilter('once', DeprecationWarning) __all__ = (Proxy, Judge, Provider, Checker, Server, ProxyPool, Broker)
tiagocoutinho/bliss
refs/heads/master
bliss/config/plugins/emotion.py
1
# -*- coding: utf-8 -*- # # This file is part of the bliss project # # Copyright (c) 2016 Beamline Control Unit, ESRF # Distributed under the GNU LGPLv3. See LICENSE for more info. from __future__ import absolute_import import os import sys import pkgutil import weakref from bliss.common.axis import Axis, AxisRef from bliss.common.encoder import Encoder from bliss.config.static import Config, get_config from bliss.common.tango import DeviceProxy from bliss.config.plugins.bliss import find_class import gevent import hashlib import sys __KNOWN_AXIS_PARAMS = { "name": str, "controller": str, "user_tag": lambda x: x.split(','), "unit": str, "steps_per_unit": float, "velocity": float, "acceleration": float, "backlash": float, "low_limit": float, "high_limit": float, } __KNOWN_CONTROLLER_PARAMS = ("name", "class", "plugin", "axes") __this_path = os.path.realpath(os.path.dirname(__file__)) def __get_controller_class_names(): return bliss.controllers.motors.__all__ def get_jinja2(): global __environment try: return __environment except NameError: from jinja2 import Environment, FileSystemLoader __environment = Environment(loader=FileSystemLoader(__this_path)) return __environment def get_item(cfg): klass = cfg.get('class') result = {'class': klass } if klass is None: result['icon'] = 'fa fa-gear' result['type'] = 'axis' else: result['icon'] = 'fa fa-gears' result['type'] = 'controller' return result def get_tree(cfg, perspective): item = get_item(cfg) name = cfg.get('name') ctrl_class = cfg.get("class") if ctrl_class is None: path = os.path.join(get_tree(cfg.parent, 'files')['path'], name) else: if perspective == "files": path = os.path.join(cfg.filename, name) else: path = name item['path'] = path return item def get_html(cfg): ctrl_class = cfg.get("class") if ctrl_class is None: return get_axis_html(cfg) else: return get_ctrl_html(cfg) def get_axis_html(cfg): name = cfg["name"] ctrl_class = cfg.parent.get("class") ctrl_name = cfg.parent.get("name") vars = dict(cfg.items()) filename = "emotion_" + ctrl_class + "_axis.html" html_template = get_jinja2().select_template([filename, "emotion_axis.html"]) extra_params = {} for key, value in vars.items(): if key not in __KNOWN_AXIS_PARAMS: extra_params[key] = dict(name=key, label=key.capitalize(), value=value) tags = cfg.get(Config.USER_TAG_KEY, []) if not isinstance(tags, (tuple, list)): tags = [tags] vars["tags"] = tags vars["controller_class"] = ctrl_class if ctrl_name: vars["controller_name"] = ctrl_name vars["params"] = extra_params vars["units"] = cfg.get("unit", "unit") controllers = list() vars["controllers"] = controllers for controller_name in __get_controller_class_names(): controllers.append({"class": controller_name}) vars["__tango_server__"] = __is_tango_device(name) return html_template.render(**vars) def get_ctrl_html(cfg): ctrl_class = cfg.get("class") vars = dict(cfg.items()) filename = "emotion_" + ctrl_class + ".html" html_template = get_jinja2().select_template([filename, "emotion_controller.html"]) extra_params = [] for key, value in vars.items(): if key not in __KNOWN_CONTROLLER_PARAMS: extra_params.append(dict(name=key, label=key.capitalize(), value=value)) vars["params"] = extra_params controllers = list() vars["controllers"] = controllers pkgpath = os.path.dirname(bliss.controllers.motors.__file__) for _, controller_name, _ in pkgutil.iter_modules([pkgpath]): controllers.append({"class": controller_name}) for axis in vars["axes"]: device = __is_tango_device(axis['name']) if device: vars["__tango_server__"] = True break else: vars["__tango_server__"] = False return html_template.render(**vars) def __is_tango_device(name): try: return DeviceProxy(name) is not None except: pass return False def __tango_apply_config(name): try: device = DeviceProxy(name) device.command_inout("ApplyConfig", True) msg = "'%s' configuration saved and applied to server!" % name msg_type = "success" except PyTango.DevFailed as df: msg = "'%s' configuration saved but <b>NOT</b> applied to " \ " server:\n%s" % (name, df[0].desc) msg_type = "warning" sys.excepthook(*sys.exc_info()) except Exception as e: msg = "'%s' configuration saved but <b>NOT</b> applied to " \ " server:\n%s" % (name, str(e)) msg_type = "warning" sys.excepthook(*sys.exc_info()) return msg, msg_type def controller_edit(cfg, request): import flask.json if request.method == "POST": form = dict([(k,v) for k,v in request.form.items() if v]) update_server = form.pop("__update_server__") == 'true' orig_name = form.pop("__original_name__") name = form.get("name", orig_name) result = dict(name=name) if name != orig_name: result["message"] = "Change of controller name not supported yet!" result["type"] = "danger" return flask.json.dumps(result) ctrl_cfg = cfg.get_config(orig_name) axes_data = {} objs = set() for param_name, param_value in form.items(): if " " in param_name: # axis param param_name, axis_name = param_name.split() obj = cfg.get_config(axis_name) try: param_value = __KNOWN_AXIS_PARAMS[param_name](param_value) except KeyError: pass else: # controller param obj = ctrl_cfg obj[param_name] = param_value objs.add(obj) axes_server_results = {} for obj in objs: obj.save() if update_server and obj != ctrl_cfg: name = obj["name"] axes_server_results[name] = __tango_apply_config(name) msg_type = "success" if update_server: if ctrl_cfg in objs: msg_type = "warning" msg = "'%s' configuration saved! " \ "TANGO server needs to be (re)started!" % name else: msg = "'%s' configuration applied!" % name for axis_name, axis_result in axes_server_results: msg += "<br/>" + axis_result['message'] axis_msg_type = axis_result['type'] if axis_msg_type != "success": msg_type = axis_msg_type else: msg = "'%s' configuration applied!" % name result["message"] = msg result["type"] = msg_type return flask.json.dumps(result) def axis_edit(cfg, request): import flask.json if request.method == "POST": form = dict([(k,v) for k,v in request.form.items() if v]) update_server = form.pop("__update_server__") == 'true' orig_name = form.pop("__original_name__") name = form["name"] result = dict(name=name) if name != orig_name: result["message"] = "Change of axis name not supported yet!" result["type"] = "danger" return flask.json.dumps(result) axis_cfg = cfg.get_config(orig_name) for k, v in form.iteritems(): try: v = __KNOWN_AXIS_PARAMS[k](v) except KeyError: pass axis_cfg[k] = v axis_cfg.save() if update_server: result["message"], result["type"] = __tango_apply_config(name) else: result["message"] = "'%s' configuration saved!" % name result["type"] = "success" return flask.json.dumps(result) __ACTIONS = \ { "add": [ {"id": "emotion_add_controller", "label": "Add controller", "icon": "fa fa-gears", "action": "plugin/emotion/add_controller", "disabled": True,}, {"id": "emotion_add_axis", "label": "Add axis", "icon": "fa fa-gears", "action": "plugin/emotion/add_axis", "disabled": True}],} def actions(): return __ACTIONS def add_controller(cfg, request): if request.method == "GET": return flask.json.dumps(dict(html="<h1>TODO</h1>", message="not implemented", type="danger")) def add_axis(cfg, request): if request.method == "GET": return flask.json.dumps(dict(html="<h1>TODO</h1>", message="not implemented", type="danger")) def create_objects_from_config_node(config, node): if 'axes' in node or 'encoders' in node: # asking for a controller obj_name = None else: obj_name = node.get('name') node = node.parent controller_class_name = node.get('class') controller_name = node.get('name') if controller_name is None: h = hashlib.md5() for axis_config in node.get('axes'): name = axis_config.get('name') if name is not None: h.update(name) controller_name = h.hexdigest() controller_class = find_class(node, "bliss.controllers.motors") controller_module = sys.modules[controller_class.__module__] axes = list() axes_names = list() encoders = list() encoders_names = list() switches = list() switches_names = list() shutters = list() shutters_names = list() for axis_config in node.get('axes'): axis_name = axis_config.get("name") if axis_name.startswith("$"): axis_class = AxisRef axis_name = axis_name.lstrip('$') else: axis_class_name = axis_config.get("class") if axis_class_name is None: axis_class = Axis else: axis_class = getattr(controller_module, axis_class_name) axes_names.append(axis_name) axes.append((axis_name, axis_class, axis_config)) for objects,objects_names,default_class,default_class_name,objects_config in\ ((encoders,encoders_names,Encoder,'',node.get('encoders',[])), (shutters,shutters_names,None,'Shutter',node.get('shutters',[])), (switches,switches_names,None,'Switch',node.get('switches',[])), ): for object_config in objects_config: object_name = object_config.get("name") object_class_name = object_config.get("class") object_config = _checkref(config,object_config) if object_class_name is None: object_class = default_class if object_class is None: try: object_class = getattr(controller_module, default_class_name) except AttributeError: pass else: object_class = getattr(controller_module, object_class_name) objects_names.append(object_name) objects.append((object_name, object_class, object_config)) controller = controller_class(controller_name, node, axes, encoders, shutters, switches) controller._update_refs(config) controller.initialize() all_names = axes_names + encoders_names + switches_names + shutters_names cache_dict = dict(zip(all_names, [controller]*len(all_names))) ctrl = cache_dict.pop(obj_name,None) if ctrl is not None: obj = create_object_from_cache(None, obj_name, controller) return { controller_name: controller, obj_name: obj }, cache_dict else: return {controller_name: controller }, cache_dict def create_object_from_cache(config, name, controller): for func in (controller.get_axis, controller.get_encoder, controller.get_switch, controller.get_shutter): try: return func(name) except KeyError: pass raise KeyError(name) def _checkref(config,cfg): obj_cfg = cfg.deep_copy() for key,value in obj_cfg.iteritems(): if isinstance(value,str) and value.startswith('$'): # convert reference to item from config obj = weakref.proxy(config.get(value)) obj_cfg[key] = obj return obj_cfg
QinerTech/QinerApps
refs/heads/master
openerp/addons/l10n_sa/__openerp__.py
23
# coding: utf-8 { 'name': 'Saudi Arabia - Accounting', 'version': '1.1', 'author': 'DVIT.ME', 'category': 'Localization/Account Charts', 'description': """ Odoo Arabic localization for most arabic countries and Saudi Arabia. This initially includes chart of accounts of USA translated to Arabic. In future this module will include some payroll rules for ME . """, 'website': 'http://www.dvit.me', 'depends': ['account', 'l10n_multilang'], 'data': [ 'account.chart.template.xml', 'account.account.template.csv', 'account_chart_template_after.xml', 'account_chart_template.yml', ], 'demo': [], 'test': [], 'installable': True, 'auto_install': False, 'post_init_hook': 'load_translations', }
hslee16/ansible-modules-extras
refs/heads/devel
cloud/openstack/os_group.py
67
#!/usr/bin/python # Copyright (c) 2016 IBM # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- module: os_group short_description: Manage OpenStack Identity Groups extends_documentation_fragment: openstack version_added: "2.1" author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)" description: - Manage OpenStack Identity Groups. Groups can be created, deleted or updated. Only the I(description) value can be updated. options: name: description: - Group name required: true description: description: - Group description required: false default: None state: description: - Should the resource be present or absent. choices: [present, absent] default: present requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Create a group named "demo" - os_group: cloud: mycloud state: present name: demo description: "Demo Group" # Update the description on existing "demo" group - os_group: cloud: mycloud state: present name: demo description: "Something else" # Delete group named "demo" - os_group: cloud: mycloud state: absent name: demo ''' RETURN = ''' group: description: Dictionary describing the group. returned: On success when I(state) is 'present'. type: dictionary contains: id: description: Unique group ID type: string sample: "ee6156ff04c645f481a6738311aea0b0" name: description: Group name type: string sample: "demo" description: description: Group description type: string sample: "Demo Group" domain_id: description: Domain for the group type: string sample: "default" ''' def _system_state_change(state, description, group): if state == 'present' and not group: return True if state == 'present' and description is not None and group.description != description: return True if state == 'absent' and group: return True return False def main(): argument_spec = openstack_full_argument_spec( name=dict(required=True), description=dict(required=False, default=None), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') name = module.params.pop('name') description = module.params.pop('description') state = module.params.pop('state') try: cloud = shade.operator_cloud(**module.params) group = cloud.get_group(name) if module.check_mode: module.exit_json(changed=_system_state_change(state, description, group)) if state == 'present': if group is None: group = cloud.create_group( name=name, description=description) changed = True else: if description is not None and group.description != description: group = cloud.update_group( group.id, description=description) changed = True else: changed = False module.exit_json(changed=changed, group=group) elif state == 'absent': if group is None: changed=False else: cloud.delete_group(group.id) changed=True module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
tinkerinestudio/Tinkerine-Suite
refs/heads/master
TinkerineSuite/PIL/GimpPaletteFile.py
14
# # Python Imaging Library # $Id$ # # stuff to read GIMP palette files # # History: # 1997-08-23 fl Created # 2004-09-07 fl Support GIMP 2.0 palette files. # # Copyright (c) Secret Labs AB 1997-2004. All rights reserved. # Copyright (c) Fredrik Lundh 1997-2004. # # See the README file for information on usage and redistribution. # import re from PIL._binary import o8 ## # File handler for GIMP's palette format. class GimpPaletteFile: rawmode = "RGB" def __init__(self, fp): self.palette = [o8(i)*3 for i in range(256)] if fp.readline()[:12] != b"GIMP Palette": raise SyntaxError("not a GIMP palette file") i = 0 while i <= 255: s = fp.readline() if not s: break # skip fields and comment lines if re.match(b"\w+:|#", s): continue if len(s) > 100: raise SyntaxError("bad palette file") v = tuple(map(int, s.split()[:3])) if len(v) != 3: raise ValueError("bad palette entry") if 0 <= i <= 255: self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2]) i = i + 1 self.palette = b"".join(self.palette) def getpalette(self): return self.palette, self.rawmode
UdK-VPT/Open_eQuarter
refs/heads/master
mole/extensions/eval_enev/oeq_QTE_Wall.py
1
# -*- coding: utf-8 -*- import os,math from qgis.core import NULL from mole import oeq_global from mole.project import config from mole.extensions import OeQExtension from mole.stat_corr import rb_contemporary_base_uvalue_by_building_age_lookup def calculation(self=None, parameters={},feature = None): from math import floor, ceil from PyQt4.QtCore import QVariant wl_qte = NULL if not oeq_global.isnull([parameters['WL_AR'],parameters['WL_UE'],parameters['HHRS']]): wl_qte=float(parameters['WL_AR']) * float(parameters['WL_UE'])*float(parameters['HHRS'])/1000 return {'WL_QTE': {'type': QVariant.Double, 'value': wl_qte}} extension = OeQExtension( extension_id=__name__, category='Evaluation', subcategory='EnEV Transm. Heat Loss', extension_name='Wall Quality (QT, EnEV)', layer_name= 'QT Wall EnEV', extension_filepath=os.path.join(__file__), colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'), field_id='WL_QTE', source_type='none', par_in=['WL_AR','WL_UE','HHRS'], sourcelayer_name=config.data_layer_name, targetlayer_name=config.data_layer_name, active=True, show_results=['WL_QTE'], description=u"Calculate the EnEV Transmission Heat Loss of the Building's Walls", evaluation_method=calculation) extension.registerExtension(default=True)
apocquet/django
refs/heads/master
tests/i18n/contenttypes/tests.py
367
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os from django.contrib.contenttypes.models import ContentType from django.test import TestCase, override_settings from django.utils import six, translation from django.utils._os import upath @override_settings( USE_I18N=True, LOCALE_PATHS=[ os.path.join(os.path.dirname(upath(__file__)), 'locale'), ], LANGUAGE_CODE='en', LANGUAGES=[ ('en', 'English'), ('fr', 'French'), ], ) class ContentTypeTests(TestCase): def test_verbose_name(self): company_type = ContentType.objects.get(app_label='i18n', model='company') with translation.override('en'): self.assertEqual(six.text_type(company_type), 'Company') with translation.override('fr'): self.assertEqual(six.text_type(company_type), 'Société')
wrr/wwwhisper
refs/heads/master
wwwhisper_service/wsgi.py
1
""" WSGI config for service project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wwwhisper_service.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. # Apply WSGI middleware here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
kdaily/cloudbiolinux
refs/heads/master
cloudbio/biodata/ggd.py
6
"""Process GGD (Get Genomics Data) configurations for installation in biodata directories. Builds off work done by Aaron Quinlan to define and install genomic data: https://github.com/arq5x/ggd """ import collections import contextlib from distutils.version import LooseVersion import os import shutil import subprocess from fabric.api import env import yaml def install_recipe(base_dir, recipe_file): """Install data in a biodata directory given instructions from GGD YAML recipe. """ assert env.hosts == ["localhost"], "GGD recipes only work for local runs" if not os.path.exists(base_dir): os.makedirs(base_dir) recipe = _read_recipe(recipe_file) if not version_uptodate(base_dir, recipe): if _has_required_programs(recipe["recipe"]["full"].get("required", [])): with tx_tmpdir(base_dir) as tmpdir: with chdir(tmpdir): print("Running GGD recipe: %s" % recipe["attributes"]["name"]) _run_recipe(tmpdir, recipe["recipe"]["full"]["recipe_cmds"], recipe["recipe"]["full"]["recipe_type"]) _move_files(tmpdir, base_dir, recipe["recipe"]["full"]["recipe_outfiles"]) add_version(base_dir, recipe) def _has_required_programs(programs): """Ensure the provided programs exist somewhere in the current PATH. http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python """ def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) for p in programs: found = False for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, p) if is_exe(exe_file): found = True break if not found: return False return True def _run_recipe(work_dir, recipe_cmds, recipe_type): """Create a bash script and run the recipe to download data. """ assert recipe_type == "bash", "Can only currently run bash recipes" run_file = os.path.join(work_dir, "ggd-run.sh") with open(run_file, "w") as out_handle: out_handle.write("#!/bin/bash\nset -eu -o pipefail\n") out_handle.write("\n".join(recipe_cmds)) subprocess.check_output(["bash", run_file]) def _move_files(tmp_dir, final_dir, out_files): for out_file in out_files: orig = os.path.join(tmp_dir, out_file) final = os.path.join(final_dir, out_file) assert os.path.exists(orig), ("Did not find expected output file %s in %s" % (out_file, tmp_dir)) cur_dir = os.path.dirname(final) if not os.path.exists(cur_dir): os.makedirs(cur_dir) os.rename(orig, final) def _read_recipe(in_file): in_file = os.path.abspath(os.path.expanduser(in_file)) with open(in_file) as in_handle: recipe = yaml.safe_load(in_handle) return recipe # ## Versioning def version_uptodate(base_dir, recipe): """Check if we have an up to date GGD installation in this directory. """ versions = _get_versions(base_dir) return (recipe["attributes"]["name"] in versions and LooseVersion(versions[recipe["attributes"]["name"]]) >= LooseVersion(str(recipe["attributes"]["version"]))) def add_version(base_dir, recipe): versions = _get_versions(base_dir) versions[recipe["attributes"]["name"]] = recipe["attributes"]["version"] with open(_get_version_file(base_dir), "w") as out_handle: for n, v in versions.items(): out_handle.write("%s,%s\n" % (n, v)) def _get_versions(base_dir): version_file = _get_version_file(base_dir) versions = collections.OrderedDict() if os.path.exists(version_file): with open(version_file) as in_handle: for line in in_handle: name, version = line.strip().split(",") versions[name] = version return versions def _get_version_file(base_dir): return os.path.join(base_dir, "versions.csv") # ## Transactional utilities @contextlib.contextmanager def tx_tmpdir(base_dir): """Context manager to create and remove a transactional temporary directory. """ tmp_dir = os.path.join(base_dir, "txtmp") if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) yield tmp_dir shutil.rmtree(tmp_dir, ignore_errors=True) @contextlib.contextmanager def chdir(new_dir): """Context manager to temporarily change to a new directory. http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/ """ cur_dir = os.getcwd() os.chdir(new_dir) try: yield finally: os.chdir(cur_dir)
edx-solutions/edx-platform
refs/heads/master
common/djangoapps/course_modes/tests/test_signals.py
4
""" Unit tests for the course_mode signals """ from datetime import datetime, timedelta import ddt from django.conf import settings from mock import patch from pytz import UTC from course_modes.models import CourseMode from course_modes.signals import _listen_for_course_publish from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID @ddt.ddt class CourseModeSignalTest(ModuleStoreTestCase): """ Tests for the course_mode course_published signal. """ def setUp(self): super(CourseModeSignalTest, self).setUp() self.end = datetime.now(tz=UTC).replace(microsecond=0) + timedelta(days=7) self.course = CourseFactory.create(end=self.end) CourseMode.objects.all().delete() def create_mode( self, mode_slug, mode_name, min_price=0, suggested_prices='', currency='usd', expiration_datetime=None, ): """ Create a new course mode """ return CourseMode.objects.get_or_create( course_id=self.course.id, mode_display_name=mode_name, mode_slug=mode_slug, min_price=min_price, suggested_prices=suggested_prices, currency=currency, _expiration_datetime=expiration_datetime, ) def test_no_verified_mode(self): """ Verify expiration not updated by signal for non-verified mode. """ course_mode, __ = self.create_mode('honor', 'honor') _listen_for_course_publish('store', self.course.id) course_mode.refresh_from_db() self.assertIsNone(course_mode.expiration_datetime) @ddt.data(1, 14, 30) def test_verified_mode(self, verification_window): """ Verify signal updates expiration to configured time period before course end for verified mode. """ course_mode, __ = self.create_mode('verified', 'verified', 10) self.assertIsNone(course_mode.expiration_datetime) with patch('course_modes.models.CourseModeExpirationConfig.current') as config: instance = config.return_value instance.verification_window = timedelta(days=verification_window) _listen_for_course_publish('store', self.course.id) course_mode.refresh_from_db() self.assertEqual(course_mode.expiration_datetime, self.end - timedelta(days=verification_window)) @ddt.data(1, 14, 30) def test_verified_mode_explicitly_set(self, verification_window): """ Verify signal does not update expiration for verified mode with explicitly set expiration. """ course_mode, __ = self.create_mode('verified', 'verified', 10) course_mode.expiration_datetime_is_explicit = True self.assertIsNone(course_mode.expiration_datetime) with patch('course_modes.models.CourseModeExpirationConfig.current') as config: instance = config.return_value instance.verification_window = timedelta(days=verification_window) _listen_for_course_publish('store', self.course.id) course_mode.refresh_from_db() self.assertEqual(course_mode.expiration_datetime, self.end - timedelta(days=verification_window)) def test_masters_mode(self): # create an xblock with verified group access AUDIT_ID = settings.COURSE_ENROLLMENT_MODES['audit']['id'] VERIFIED_ID = settings.COURSE_ENROLLMENT_MODES['verified']['id'] MASTERS_ID = settings.COURSE_ENROLLMENT_MODES['masters']['id'] verified_section = ItemFactory.create( category="sequential", metadata={'group_access': {ENROLLMENT_TRACK_PARTITION_ID: [VERIFIED_ID]}} ) # and a section with no restriction section2 = ItemFactory.create( category="sequential", ) section3 = ItemFactory.create( category='sequential', metadata={'group_access': {ENROLLMENT_TRACK_PARTITION_ID: [AUDIT_ID]}} ) with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred): # create the master's mode. signal will add masters to the verified section self.create_mode('masters', 'masters') verified_section_ret = self.store.get_item(verified_section.location) section2_ret = self.store.get_item(section2.location) section3_ret = self.store.get_item(section3.location) # the verified section will now also be visible to master's assert verified_section_ret.group_access[ENROLLMENT_TRACK_PARTITION_ID] == [VERIFIED_ID, MASTERS_ID] assert section2_ret.group_access == {} assert section3_ret.group_access == {ENROLLMENT_TRACK_PARTITION_ID: [AUDIT_ID]}
axinging/crosswalk
refs/heads/master
PRESUBMIT.py
9
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Copyright (c) 2016 Intel Corporation. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Presubmit checks for Crosswalk. These checks are performed automatically by our Trybots when a patch is sent for review. Most of them come from Chromium's own PRESUBMIT.py script. """ import cpplint import errno import os import sys _LICENSE_HEADER_RE = ( r'(.*? Copyright (\(c\) )?2[\d]{3}.+\n)+' r'.*? Use of this source code is governed by a BSD-style license that ' r'can be\n' r'.*? found in the LICENSE file\.(?: \*/)?\n' ) def _CheckChangeLintsClean(input_api, output_api): class PrefixedFileInfo(cpplint.FileInfo): def RepositoryName(self): fullname = self.FullName() repo_pos = fullname.find('xwalk/') if repo_pos == -1: # Something weird happened, bail out. return [output_api.PresubmitError( 'Cannot find "xwalk/" in %s.' % fullname)] return fullname[repo_pos:] input_api.cpplint.FileInfo = PrefixedFileInfo source_filter = lambda filename: input_api.FilterSourceFile( filename, white_list=(r'.+\.(cc|h)$',)) return input_api.canned_checks.CheckChangeLintsClean( input_api, output_api, source_filter) def CheckChangeOnUpload(input_api, output_api): # We need to prepend to sys.path, otherwise we may import depot_tools's own # PRESUBMIT.py. sys.path = [os.path.dirname(input_api.PresubmitLocalPath())] + sys.path import PRESUBMIT as cr results = [] results.extend(_CheckChangeLintsClean(input_api, output_api)) results.extend(cr._CheckNoIOStreamInHeaders(input_api, output_api)) results.extend(cr._CheckNoUNIT_TESTInSourceFiles(input_api, output_api)) results.extend(cr._CheckDCHECK_IS_ONHasBraces(input_api, output_api)) results.extend(cr._CheckNoNewWStrings(input_api, output_api)) results.extend(cr._CheckNoPragmaOnce(input_api, output_api)) results.extend(cr._CheckNoTrinaryTrueFalse(input_api, output_api)) results.extend( cr._CheckNoAuraWindowPropertyHInHeaders(input_api, output_api)) results.extend(cr._CheckForVersionControlConflicts(input_api, output_api)) results.extend(cr._CheckPatchFiles(input_api, output_api)) results.extend(cr._CheckNoAbbreviationInPngFileName(input_api, output_api)) results.extend(cr._CheckForInvalidOSMacros(input_api, output_api)) results.extend(cr._CheckForInvalidIfDefinedMacros(input_api, output_api)) results.extend(cr._CheckNoDeprecatedCSS(input_api, output_api)) results.extend(cr._CheckSingletonInHeaders(input_api, output_api)) results.extend( input_api.canned_checks.CheckGNFormatted(input_api, output_api)) # The following checks should be enabled only after we fix all violations. # results.extend(input_api.canned_checks.PanProjectChecks( # input_api, output_api, license_header=_LICENSE_HEADER_RE, # project_name='Crosswalk', owners_check=False)) # results.extend( # input_api.canned_checks.CheckPatchFormatted(input_api, output_api)) # results.extend( # input_api.canned_checks.CheckChangeHasOnlyOneEol(input_api, output_api)) # results.extend( # input_api.canned_checks.CheckChangeTodoHasOwner(input_api, output_api)) # results.extend( # cr._CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api)) # results.extend(cr._CheckNoBannedFunctions(input_api, output_api)) # results.extend(cr._CheckIncludeOrder(input_api, output_api)) results.extend( input_api.canned_checks.CheckChangeHasNoTabs( input_api, output_api, source_file_filter=lambda x: x.LocalPath().endswith('.grd'))) # results.extend(cr._CheckSpamLogging(input_api, output_api)) # results.extend(cr._CheckNoDeprecatedJS(input_api, output_api)) # results.extend(cr._CheckForIPCRules(input_api, output_api)) results.extend(cr._CheckForWindowsLineEndings(input_api, output_api)) # results.extend(cr._AndroidSpecificOnUploadChecks(input_api, output_api)) # Some checks input_api.PresubmitLocalPath() returns Chromium's root # directory, so we need to fake it. input_api._current_presubmit_path = os.path.dirname( input_api.PresubmitLocalPath()) results.extend(cr._CheckParseErrors(input_api, output_api)) results.extend(cr._CheckFilePermissions(input_api, output_api)) # Our DEPS rules need to be adjusted before we can enable this check. # results.extend(cr._CheckUnwantedDependencies(input_api, output_api)) return results # We do not use Chromium's commit queue, so the checks for mode uploading and # committing should be the same. CheckChangeOnCommit = CheckChangeOnUpload
bodylabs/rigger
refs/heads/master
bodylabs_rigger/factory.py
2
class RiggedModelFactory(object): """Generates rigged models from vertices. The factory is initialized with the static data for the model rig: the mesh topology and texture map, the joint hierarchy, and the vertex weight map. The RiggedModelFactory can then be used to generate FbxScene objects binding the rig to a set of mesh vertices. """ def __init__(self, textured_mesh, joint_tree, joint_position_spec, clusters): """Initializes the RiggedModelFactory. textured_mesh: a TexturedMesh object joint_tree: the JointTree at the root of the joint hierarchy joint_position_spec: dict mapping joint name to position specification. See `joint_positions.py` for more details. clusters: dict mapping joint name to ControlPointCluster """ self._textured_mesh = textured_mesh self._joint_tree = joint_tree self._joint_position_spec = joint_position_spec self._clusters = clusters def _set_mesh(self, v, fbx_scene, root): """Set the FbxMesh for the given scene. v: the mesh vertices fbx_scene: the FbxScene to which this mesh should be added root: the FbxNode off which the mesh will be added Returns the FbxNode to which the mesh was added. """ from fbx import ( FbxLayerElement, FbxMesh, FbxNode, FbxVector2, FbxVector4, ) # Create a new node in the scene. fbx_mesh_node = FbxNode.Create(fbx_scene, self._textured_mesh.name) root.AddChild(fbx_mesh_node) fbx_mesh = FbxMesh.Create(fbx_scene, '') fbx_mesh_node.SetNodeAttribute(fbx_mesh) # Vertices. num_vertices = v.shape[0] fbx_mesh.InitControlPoints(num_vertices) for vi in range(num_vertices): new_control_point = FbxVector4(*v[vi, :]) fbx_mesh.SetControlPointAt(new_control_point, vi) # Faces. faces = self._textured_mesh.faces for fi in range(faces.shape[0]): face = faces[fi, :] fbx_mesh.BeginPolygon(fi) for vi in range(faces.shape[1]): fbx_mesh.AddPolygon(face[vi]) fbx_mesh.EndPolygon() fbx_mesh.BuildMeshEdgeArray() # Vertex normals. fbx_mesh.GenerateNormals( False, # pOverwrite True, # pByCtrlPoint ) # UV map. uv_indices = self._textured_mesh.uv_indices.ravel() uv_values = self._textured_mesh.uv_values uv = fbx_mesh.CreateElementUV('') uv.SetMappingMode(FbxLayerElement.eByPolygonVertex) uv.SetReferenceMode(FbxLayerElement.eIndexToDirect) index_array = uv.GetIndexArray() direct_array = uv.GetDirectArray() index_array.SetCount(uv_indices.size) direct_array.SetCount(uv_values.shape[0]) for ei, uvi in enumerate(uv_indices): index_array.SetAt(ei, uvi) direct_array.SetAt(uvi, FbxVector2(*uv_values[uvi, :])) return fbx_mesh_node def _set_node_translation(self, location, fbx_node): """Translates a node to a location in world coordinates. location: an unpackable (x, y, z) vector fbx_node: the FbxNode to translate """ from fbx import ( FbxAMatrix, FbxDouble4, FbxVector4, ) # We want to affect a target global position change by modifying the # local node translation. If the global transformation were based # solely on translation, rotation, and scale, we could set each of # these individually and be done. However, the global transformation # matrix computation is more complicated. For details, see # http://help.autodesk.com/view/FBX/2015/ENU/ # ?guid=__files_GUID_10CDD63C_79C1_4F2D_BB28_AD2BE65A02ED_htm # # We get around this by setting the world transform to our desired # global translation matrix, then solving for the local translation. global_pos_mat = FbxAMatrix() global_pos_mat.SetIdentity() global_pos_mat.SetT(FbxVector4(*location)) current_global_pos_mat = fbx_node.EvaluateGlobalTransform() parent_global_pos_mat = fbx_node.GetParent().EvaluateGlobalTransform() current_local_translation = FbxAMatrix() current_local_translation.SetIdentity() current_local_translation.SetT( FbxVector4(fbx_node.LclTranslation.Get())) new_local_translation = ( parent_global_pos_mat.Inverse() * global_pos_mat * current_global_pos_mat.Inverse() * parent_global_pos_mat * current_local_translation ) fbx_node.LclTranslation.Set(FbxDouble4(*new_local_translation.GetT())) def _extend_skeleton(self, parent_fbx_node, reference_joint_tree, target_fbx_node_positions, fbx_scene): """Extend the FbxNode skeleton according to the reference JointTree. parent_fbx_node: the FbxNode off which the skeleton will be extended reference_joint_tree: the reference JointTree object providing the hierarchy target_fbx_node_positions: a mapping from joint name to the desired position for the respective FbxNode in the skeleton fbx_scene: the FbxScene to which the skeleton should be added Returns a map from node name to FbxNode. """ from fbx import ( FbxNode, FbxSkeleton, ) fbx_node_map = {} skeleton = FbxSkeleton.Create(fbx_scene, '') skeleton.SetSkeletonType(FbxSkeleton.eLimbNode) node_name = reference_joint_tree.name node = FbxNode.Create(fbx_scene, node_name) node.SetNodeAttribute(skeleton) parent_fbx_node.AddChild(node) fbx_node_map[node_name] = node node_position = target_fbx_node_positions.get(node_name, None) if node_position is not None: self._set_node_translation(node_position, node) else: print "Position information missing for '{}'".format(node_name) for child in reference_joint_tree.children: fbx_node_map.update(self._extend_skeleton( node, child, target_fbx_node_positions, fbx_scene)) return fbx_node_map def _add_skin_and_bind_pose(self, fbx_node_map, fbx_mesh_node, fbx_scene): """Adds a deformer skin and bind pose. fbx_node_map: a map from node name to FbxNode. These nodes will become the cluster links. fbx_mesh_node: the FbxNode where our mesh is attached (i.e. as the node attribute). The skin will be added as a deformer of this mesh. fbx_scene: the FbxScene to which the skin and bind pose should be added. """ from fbx import ( FbxCluster, FbxMatrix, FbxPose, FbxSkin, ) mesh = fbx_mesh_node.GetNodeAttribute() # Create the bind pose. We'll give the bind pose a unique name since # it is added at the level of the global scene. bind_pose = FbxPose.Create( fbx_scene, 'pose{}'.format(fbx_scene.GetPoseCount() + 1)) bind_pose.SetIsBindPose(True) bind_pose.Add(fbx_mesh_node, FbxMatrix( fbx_mesh_node.EvaluateGlobalTransform())) skin = FbxSkin.Create(fbx_scene, '') for node_name, node in fbx_node_map.iteritems(): cluster_info = self._clusters.get(node_name) if cluster_info is None: continue cluster = FbxCluster.Create(fbx_scene, '') cluster.SetLink(node) cluster.SetLinkMode(FbxCluster.eNormalize) vindices = cluster_info.indices weights = cluster_info.weights for vid, weight in zip(vindices, weights): cluster.AddControlPointIndex(vid, weight) transform = node.EvaluateGlobalTransform() cluster.SetTransformLinkMatrix(transform) bind_pose.Add(node, FbxMatrix(transform)) skin.AddCluster(cluster) mesh.AddDeformer(skin) fbx_scene.AddPose(bind_pose) def construct_rig(self, vertices, fbx_manager): """Construct rig for the given vertices. vertices: an Vx3 numpy array in centimeter units. Returns a new FbxScene. """ from joint_positions import calculate_joint_positions from fbx import FbxScene fbx_scene = FbxScene.Create(fbx_manager, '') # We'll build the rig off of this node. One child will root # the joint skeleton and another will contain the mesh and skin. rig_root_node = fbx_scene.GetRootNode() target_joint_positions = calculate_joint_positions( vertices, self._joint_position_spec) # Add the skeleton to the scene, saving the nodes by name. We'll # then use this map to link the nodes to their vertex clusters. fbx_node_map = self._extend_skeleton( rig_root_node, self._joint_tree, target_joint_positions, fbx_scene) # Add the mesh, skin, and bind pose. fbx_mesh_node = self._set_mesh(vertices, fbx_scene, rig_root_node) self._add_skin_and_bind_pose(fbx_node_map, fbx_mesh_node, fbx_scene) return fbx_scene @classmethod def create_default(cls): import os import bodylabs_rigger.static from bodylabs_rigger.rig_assets import RigAssets assets = RigAssets.load(os.path.join( os.path.dirname(bodylabs_rigger.static.__file__), 'rig_assets.json')) return cls(**assets.__dict__)
aherlihy/mongo-python-driver
refs/heads/master
test/test_legacy_api.py
2
# Copyright 2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test various legacy / deprecated API features.""" import itertools import sys import threading import time import warnings sys.path[0:0] = [""] from bson.codec_options import CodecOptions from bson.dbref import DBRef from bson.objectid import ObjectId from bson.son import SON from pymongo import ASCENDING, DESCENDING from pymongo.errors import (ConfigurationError, CursorNotFound, DocumentTooLarge, DuplicateKeyError, InvalidDocument, InvalidOperation, OperationFailure, WTimeoutError) from pymongo.message import _CursorAddress from pymongo.son_manipulator import (AutoReference, NamespaceInjector, ObjectIdShuffler, SONManipulator) from pymongo.write_concern import WriteConcern from test import client_context, qcheck, unittest, SkipTest from test.test_client import IntegrationTest from test.utils import (joinall, oid_generated_on_client, rs_or_single_client, wait_until) class TestDeprecations(IntegrationTest): @classmethod def setUpClass(cls): super(TestDeprecations, cls).setUpClass() cls.warn_context = warnings.catch_warnings() cls.warn_context.__enter__() warnings.simplefilter("error", DeprecationWarning) @classmethod def tearDownClass(cls): cls.warn_context.__exit__() cls.warn_context = None def test_save_deprecation(self): self.assertRaises( DeprecationWarning, lambda: self.db.test.save({})) def test_insert_deprecation(self): self.assertRaises( DeprecationWarning, lambda: self.db.test.insert({})) def test_update_deprecation(self): self.assertRaises( DeprecationWarning, lambda: self.db.test.update({}, {})) def test_remove_deprecation(self): self.assertRaises( DeprecationWarning, lambda: self.db.test.remove({})) def test_find_and_modify_deprecation(self): self.assertRaises( DeprecationWarning, lambda: self.db.test.find_and_modify({'i': 5}, {})) def test_add_son_manipulator_deprecation(self): db = self.client.pymongo_test self.assertRaises(DeprecationWarning, lambda: db.add_son_manipulator(AutoReference(db))) def test_ensure_index_deprecation(self): try: self.assertRaises( DeprecationWarning, lambda: self.db.test.ensure_index('i')) finally: self.db.test.drop() class TestLegacy(IntegrationTest): @classmethod def setUpClass(cls): super(TestLegacy, cls).setUpClass() cls.w = client_context.w cls.warn_context = warnings.catch_warnings() cls.warn_context.__enter__() warnings.simplefilter("ignore", DeprecationWarning) @classmethod def tearDownClass(cls): cls.warn_context.__exit__() cls.warn_context = None def test_insert_find_one(self): # Tests legacy insert. db = self.db db.test.drop() self.assertEqual(0, len(list(db.test.find()))) doc = {"hello": u"world"} _id = db.test.insert(doc) self.assertEqual(1, len(list(db.test.find()))) self.assertEqual(doc, db.test.find_one()) self.assertEqual(doc["_id"], _id) self.assertTrue(isinstance(_id, ObjectId)) doc_class = dict # Work around http://bugs.jython.org/issue1728 if (sys.platform.startswith('java') and sys.version_info[:3] >= (2, 5, 2)): doc_class = SON db = self.client.get_database( db.name, codec_options=CodecOptions(document_class=doc_class)) def remove_insert_find_one(doc): db.test.remove({}) db.test.insert(doc) # SON equality is order sensitive. return db.test.find_one() == doc.to_dict() qcheck.check_unittest(self, remove_insert_find_one, qcheck.gen_mongo_dict(3)) def test_generator_insert(self): # Only legacy insert currently supports insert from a generator. db = self.db db.test.remove({}) self.assertEqual(db.test.find().count(), 0) db.test.insert(({'a': i} for i in range(5)), manipulate=False) self.assertEqual(5, db.test.count()) db.test.remove({}) db.test.insert(({'a': i} for i in range(5)), manipulate=True) self.assertEqual(5, db.test.count()) db.test.remove({}) def test_insert_multiple(self): # Tests legacy insert. db = self.db db.drop_collection("test") doc1 = {"hello": u"world"} doc2 = {"hello": u"mike"} self.assertEqual(db.test.find().count(), 0) ids = db.test.insert([doc1, doc2]) self.assertEqual(db.test.find().count(), 2) self.assertEqual(doc1, db.test.find_one({"hello": u"world"})) self.assertEqual(doc2, db.test.find_one({"hello": u"mike"})) self.assertEqual(2, len(ids)) self.assertEqual(doc1["_id"], ids[0]) self.assertEqual(doc2["_id"], ids[1]) ids = db.test.insert([{"hello": 1}]) self.assertTrue(isinstance(ids, list)) self.assertEqual(1, len(ids)) self.assertRaises(InvalidOperation, db.test.insert, []) # Generator that raises StopIteration on first call to next(). self.assertRaises(InvalidOperation, db.test.insert, (i for i in [])) def test_insert_multiple_with_duplicate(self): # Tests legacy insert. db = self.db db.drop_collection("test_insert_multiple_with_duplicate") collection = db.test_insert_multiple_with_duplicate collection.create_index([('i', ASCENDING)], unique=True) # No error collection.insert([{'i': i} for i in range(5, 10)], w=0) wait_until(lambda: 5 == collection.count(), 'insert 5 documents') db.drop_collection("test_insert_multiple_with_duplicate") collection.create_index([('i', ASCENDING)], unique=True) # No error collection.insert([{'i': 1}] * 2, w=0) wait_until(lambda: 1 == collection.count(), 'insert 1 document') self.assertRaises( DuplicateKeyError, lambda: collection.insert([{'i': 2}] * 2), ) db.drop_collection("test_insert_multiple_with_duplicate") db = self.client.get_database( db.name, write_concern=WriteConcern(w=0)) collection = db.test_insert_multiple_with_duplicate collection.create_index([('i', ASCENDING)], unique=True) # No error. collection.insert([{'i': 1}] * 2) wait_until(lambda: 1 == collection.count(), 'insert 1 document') # Implied acknowledged. self.assertRaises( DuplicateKeyError, lambda: collection.insert([{'i': 2}] * 2, fsync=True), ) # Explicit acknowledged. self.assertRaises( DuplicateKeyError, lambda: collection.insert([{'i': 2}] * 2, w=1)) db.drop_collection("test_insert_multiple_with_duplicate") def test_insert_iterables(self): # Tests legacy insert. db = self.db self.assertRaises(TypeError, db.test.insert, 4) self.assertRaises(TypeError, db.test.insert, None) self.assertRaises(TypeError, db.test.insert, True) db.drop_collection("test") self.assertEqual(db.test.find().count(), 0) db.test.insert(({"hello": u"world"}, {"hello": u"world"})) self.assertEqual(db.test.find().count(), 2) db.drop_collection("test") self.assertEqual(db.test.find().count(), 0) db.test.insert(map(lambda x: {"hello": "world"}, itertools.repeat(None, 10))) self.assertEqual(db.test.find().count(), 10) def test_insert_manipulate_false(self): # Test two aspects of legacy insert with manipulate=False: # 1. The return value is None or [None] as appropriate. # 2. _id is not set on the passed-in document object. collection = self.db.test_insert_manipulate_false collection.drop() oid = ObjectId() doc = {'a': oid} try: # The return value is None. self.assertTrue(collection.insert(doc, manipulate=False) is None) # insert() shouldn't set _id on the passed-in document object. self.assertEqual({'a': oid}, doc) # Bulk insert. The return value is a list of None. self.assertEqual([None], collection.insert([{}], manipulate=False)) docs = [{}, {}] ids = collection.insert(docs, manipulate=False) self.assertEqual([None, None], ids) self.assertEqual([{}, {}], docs) finally: collection.drop() def test_continue_on_error(self): # Tests legacy insert. db = self.db db.drop_collection("test_continue_on_error") collection = db.test_continue_on_error oid = collection.insert({"one": 1}) self.assertEqual(1, collection.count()) docs = [] docs.append({"_id": oid, "two": 2}) # Duplicate _id. docs.append({"three": 3}) docs.append({"four": 4}) docs.append({"five": 5}) with self.assertRaises(DuplicateKeyError): collection.insert(docs, manipulate=False) self.assertEqual(1, collection.count()) with self.assertRaises(DuplicateKeyError): collection.insert(docs, manipulate=False, continue_on_error=True) self.assertEqual(4, collection.count()) collection.remove({}, w=client_context.w) oid = collection.insert({"_id": oid, "one": 1}, w=0) wait_until(lambda: 1 == collection.count(), 'insert 1 document') docs[0].pop("_id") docs[2]["_id"] = oid with self.assertRaises(DuplicateKeyError): collection.insert(docs, manipulate=False) self.assertEqual(3, collection.count()) collection.insert(docs, manipulate=False, continue_on_error=True, w=0) wait_until(lambda: 6 == collection.count(), 'insert 3 documents') def test_acknowledged_insert(self): # Tests legacy insert. db = self.db db.drop_collection("test_acknowledged_insert") collection = db.test_acknowledged_insert a = {"hello": "world"} collection.insert(a) collection.insert(a, w=0) self.assertRaises(OperationFailure, collection.insert, a) def test_insert_adds_id(self): # Tests legacy insert. doc = {"hello": "world"} self.db.test.insert(doc) self.assertTrue("_id" in doc) docs = [{"hello": "world"}, {"hello": "world"}] self.db.test.insert(docs) for doc in docs: self.assertTrue("_id" in doc) def test_insert_large_batch(self): # Tests legacy insert. db = self.client.test_insert_large_batch self.addCleanup(self.client.drop_database, 'test_insert_large_batch') max_bson_size = self.client.max_bson_size if client_context.version.at_least(2, 5, 4, -1): # Write commands are limited to 16MB + 16k per batch big_string = 'x' * int(max_bson_size / 2) else: big_string = 'x' * (max_bson_size - 100) # Batch insert that requires 2 batches. successful_insert = [{'x': big_string}, {'x': big_string}, {'x': big_string}, {'x': big_string}] db.collection_0.insert(successful_insert, w=1) self.assertEqual(4, db.collection_0.count()) db.collection_0.drop() # Test that inserts fail after first error. insert_second_fails = [{'_id': 'id0', 'x': big_string}, {'_id': 'id0', 'x': big_string}, {'_id': 'id1', 'x': big_string}, {'_id': 'id2', 'x': big_string}] with self.assertRaises(DuplicateKeyError): db.collection_1.insert(insert_second_fails) self.assertEqual(1, db.collection_1.count()) db.collection_1.drop() # 2 batches, 2nd insert fails, don't continue on error. self.assertTrue(db.collection_2.insert(insert_second_fails, w=0)) wait_until(lambda: 1 == db.collection_2.count(), 'insert 1 document', timeout=60) db.collection_2.drop() # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are # dupes. Acknowledged, continue on error. insert_two_failures = [{'_id': 'id0', 'x': big_string}, {'_id': 'id0', 'x': big_string}, {'_id': 'id1', 'x': big_string}, {'_id': 'id1', 'x': big_string}] with self.assertRaises(OperationFailure) as context: db.collection_3.insert(insert_two_failures, continue_on_error=True, w=1) self.assertIn('id1', str(context.exception)) # Only the first and third documents should be inserted. self.assertEqual(2, db.collection_3.count()) db.collection_3.drop() # 2 batches, 2 errors, unacknowledged, continue on error. db.collection_4.insert(insert_two_failures, continue_on_error=True, w=0) # Only the first and third documents are inserted. wait_until(lambda: 2 == db.collection_4.count(), 'insert 2 documents', timeout=60) db.collection_4.drop() def test_bad_dbref(self): # Requires the legacy API to test. c = self.db.test c.drop() # Incomplete DBRefs. self.assertRaises( InvalidDocument, c.insert_one, {'ref': {'$ref': 'collection'}}) self.assertRaises( InvalidDocument, c.insert_one, {'ref': {'$id': ObjectId()}}) ref_only = {'ref': {'$ref': 'collection'}} id_only = {'ref': {'$id': ObjectId()}} # Starting with MongoDB 2.5.2 this is no longer possible # from insert, update, or findAndModify. if not client_context.version.at_least(2, 5, 2): # Force insert of ref without $id. c.insert(ref_only, check_keys=False) self.assertEqual(DBRef('collection', id=None), c.find_one()['ref']) c.drop() # DBRef without $ref is decoded as normal subdocument. c.insert(id_only, check_keys=False) self.assertEqual(id_only, c.find_one()) def test_update(self): # Tests legacy update. db = self.db db.drop_collection("test") id1 = db.test.save({"x": 5}) db.test.update({}, {"$inc": {"x": 1}}) self.assertEqual(db.test.find_one(id1)["x"], 6) id2 = db.test.save({"x": 1}) db.test.update({"x": 6}, {"$inc": {"x": 1}}) self.assertEqual(db.test.find_one(id1)["x"], 7) self.assertEqual(db.test.find_one(id2)["x"], 1) def test_update_manipulate(self): # Tests legacy update. db = self.db db.drop_collection("test") db.test.insert({'_id': 1}) db.test.update({'_id': 1}, {'a': 1}, manipulate=True) self.assertEqual( {'_id': 1, 'a': 1}, db.test.find_one()) class AddField(SONManipulator): def transform_incoming(self, son, dummy): son['field'] = 'value' return son db.add_son_manipulator(AddField()) db.test.update({'_id': 1}, {'a': 2}, manipulate=False) self.assertEqual( {'_id': 1, 'a': 2}, db.test.find_one()) db.test.update({'_id': 1}, {'a': 3}, manipulate=True) self.assertEqual( {'_id': 1, 'a': 3, 'field': 'value'}, db.test.find_one()) def test_update_nmodified(self): # Tests legacy update. db = self.db db.drop_collection("test") ismaster = self.client.admin.command('ismaster') used_write_commands = (ismaster.get("maxWireVersion", 0) > 1) db.test.insert({'_id': 1}) result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) if used_write_commands: self.assertEqual(1, result['nModified']) else: self.assertFalse('nModified' in result) # x is already 1. result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) if used_write_commands: self.assertEqual(0, result['nModified']) else: self.assertFalse('nModified' in result) def test_multi_update(self): # Tests legacy update. db = self.db db.drop_collection("test") db.test.save({"x": 4, "y": 3}) db.test.save({"x": 5, "y": 5}) db.test.save({"x": 4, "y": 4}) db.test.update({"x": 4}, {"$set": {"y": 5}}, multi=True) self.assertEqual(3, db.test.count()) for doc in db.test.find(): self.assertEqual(5, doc["y"]) self.assertEqual(2, db.test.update({"x": 4}, {"$set": {"y": 6}}, multi=True)["n"]) def test_upsert(self): # Tests legacy update. db = self.db db.drop_collection("test") db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) self.assertEqual(1, db.test.count()) self.assertEqual(2, db.test.find_one()["count"]) def test_acknowledged_update(self): # Tests legacy update. db = self.db db.drop_collection("test_acknowledged_update") collection = db.test_acknowledged_update collection.create_index("x", unique=True) collection.insert({"x": 5}) _id = collection.insert({"x": 4}) self.assertEqual( None, collection.update({"_id": _id}, {"$inc": {"x": 1}}, w=0)) self.assertRaises(DuplicateKeyError, collection.update, {"_id": _id}, {"$inc": {"x": 1}}) self.assertEqual(1, collection.update({"_id": _id}, {"$inc": {"x": 2}})["n"]) self.assertEqual(0, collection.update({"_id": "foo"}, {"$inc": {"x": 2}})["n"]) db.drop_collection("test_acknowledged_update") def test_update_backward_compat(self): # MongoDB versions >= 2.6.0 don't return the updatedExisting field # and return upsert _id in an array subdocument. This test should # pass regardless of server version or type (mongod/s). # Tests legacy update. c = self.db.test c.drop() oid = ObjectId() res = c.update({'_id': oid}, {'$set': {'a': 'a'}}, upsert=True) self.assertFalse(res.get('updatedExisting')) self.assertEqual(oid, res.get('upserted')) res = c.update({'_id': oid}, {'$set': {'b': 'b'}}) self.assertTrue(res.get('updatedExisting')) def test_save(self): # Tests legacy save. self.db.drop_collection("test_save") collection = self.db.test_save # Save a doc with autogenerated id _id = collection.save({"hello": "world"}) self.assertEqual(collection.find_one()["_id"], _id) self.assertTrue(isinstance(_id, ObjectId)) # Save a doc with explicit id collection.save({"_id": "explicit_id", "hello": "bar"}) doc = collection.find_one({"_id": "explicit_id"}) self.assertEqual(doc['_id'], 'explicit_id') self.assertEqual(doc['hello'], 'bar') # Save docs with _id field already present (shouldn't create new docs) self.assertEqual(2, collection.count()) collection.save({'_id': _id, 'hello': 'world'}) self.assertEqual(2, collection.count()) collection.save({'_id': 'explicit_id', 'hello': 'baz'}) self.assertEqual(2, collection.count()) self.assertEqual( 'baz', collection.find_one({'_id': 'explicit_id'})['hello'] ) # Acknowledged mode. collection.create_index("hello", unique=True) # No exception, even though we duplicate the first doc's "hello" value collection.save({'_id': 'explicit_id', 'hello': 'world'}, w=0) self.assertRaises( DuplicateKeyError, collection.save, {'_id': 'explicit_id', 'hello': 'world'}) self.db.drop_collection("test") def test_save_with_invalid_key(self): # Tests legacy save. self.db.drop_collection("test") self.assertTrue(self.db.test.insert({"hello": "world"})) doc = self.db.test.find_one() doc['a.b'] = 'c' expected = InvalidDocument if client_context.version.at_least(2, 5, 4, -1): expected = OperationFailure self.assertRaises(expected, self.db.test.save, doc) def test_acknowledged_save(self): # Tests legacy save. db = self.db db.drop_collection("test_acknowledged_save") collection = db.test_acknowledged_save collection.create_index("hello", unique=True) collection.save({"hello": "world"}) collection.save({"hello": "world"}, w=0) self.assertRaises(DuplicateKeyError, collection.save, {"hello": "world"}) db.drop_collection("test_acknowledged_save") def test_save_adds_id(self): # Tests legacy save. doc = {"hello": "jesse"} self.db.test.save(doc) self.assertTrue("_id" in doc) def test_save_returns_id(self): doc = {"hello": "jesse"} _id = self.db.test.save(doc) self.assertTrue(isinstance(_id, ObjectId)) self.assertEqual(_id, doc["_id"]) doc["hi"] = "bernie" _id = self.db.test.save(doc) self.assertTrue(isinstance(_id, ObjectId)) self.assertEqual(_id, doc["_id"]) def test_remove_one(self): # Tests legacy remove. self.db.test.remove() self.assertEqual(0, self.db.test.count()) self.db.test.insert({"x": 1}) self.db.test.insert({"y": 1}) self.db.test.insert({"z": 1}) self.assertEqual(3, self.db.test.count()) self.db.test.remove(multi=False) self.assertEqual(2, self.db.test.count()) self.db.test.remove() self.assertEqual(0, self.db.test.count()) def test_remove_all(self): # Tests legacy remove. self.db.test.remove() self.assertEqual(0, self.db.test.count()) self.db.test.insert({"x": 1}) self.db.test.insert({"y": 1}) self.assertEqual(2, self.db.test.count()) self.db.test.remove() self.assertEqual(0, self.db.test.count()) def test_remove_non_objectid(self): # Tests legacy remove. db = self.db db.drop_collection("test") db.test.insert_one({"_id": 5}) self.assertEqual(1, db.test.count()) db.test.remove(5) self.assertEqual(0, db.test.count()) def test_write_large_document(self): # Tests legacy insert, save, and update. max_size = self.db.client.max_bson_size half_size = int(max_size / 2) self.assertEqual(max_size, 16777216) expected = DocumentTooLarge if client_context.version.at_least(2, 5, 4, -1): # Document too large handled by the server expected = OperationFailure self.assertRaises(expected, self.db.test.insert, {"foo": "x" * max_size}) self.assertRaises(expected, self.db.test.save, {"foo": "x" * max_size}) self.assertRaises(expected, self.db.test.insert, [{"x": 1}, {"foo": "x" * max_size}]) self.db.test.insert([{"foo": "x" * half_size}, {"foo": "x" * half_size}]) self.db.test.insert({"bar": "x"}) # Use w=0 here to test legacy doc size checking in all server versions self.assertRaises(DocumentTooLarge, self.db.test.update, {"bar": "x"}, {"bar": "x" * (max_size - 14)}, w=0) # This will pass with OP_UPDATE or the update command. self.db.test.update({"bar": "x"}, {"bar": "x" * (max_size - 32)}) def test_last_error_options(self): # Tests legacy write methods. self.db.test.save({"x": 1}, w=1, wtimeout=1) self.db.test.insert({"x": 1}, w=1, wtimeout=1) self.db.test.remove({"x": 1}, w=1, wtimeout=1) self.db.test.update({"x": 1}, {"y": 2}, w=1, wtimeout=1) if client_context.replica_set_name: # client_context.w is the number of hosts in the replica set w = client_context.w + 1 # MongoDB 2.8+ raises error code 100, CannotSatisfyWriteConcern, # if w > number of members. Older versions just time out after 1 ms # as if they had enough secondaries but some are lagging. They # return an error with 'wtimeout': True and no code. def wtimeout_err(f, *args, **kwargs): try: f(*args, **kwargs) except WTimeoutError as exc: self.assertIsNotNone(exc.details) except OperationFailure as exc: self.assertIsNotNone(exc.details) self.assertEqual(100, exc.code, "Unexpected error: %r" % exc) else: self.fail("%s should have failed" % f) coll = self.db.test wtimeout_err(coll.save, {"x": 1}, w=w, wtimeout=1) wtimeout_err(coll.insert, {"x": 1}, w=w, wtimeout=1) wtimeout_err(coll.update, {"x": 1}, {"y": 2}, w=w, wtimeout=1) wtimeout_err(coll.remove, {"x": 1}, w=w, wtimeout=1) # can't use fsync and j options together self.assertRaises(ConfigurationError, self.db.test.insert, {"_id": 1}, j=True, fsync=True) def test_find_and_modify(self): c = self.db.test c.drop() c.insert({'_id': 1, 'i': 1}) # Test that we raise DuplicateKeyError when appropriate. # MongoDB doesn't have a code field for DuplicateKeyError # from commands before 2.2. if client_context.version.at_least(2, 2): c.ensure_index('i', unique=True) self.assertRaises(DuplicateKeyError, c.find_and_modify, query={'i': 1, 'j': 1}, update={'$set': {'k': 1}}, upsert=True) c.drop_indexes() # Test correct findAndModify self.assertEqual({'_id': 1, 'i': 1}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) self.assertEqual({'_id': 1, 'i': 3}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, new=True)) self.assertEqual({'_id': 1, 'i': 3}, c.find_and_modify({'_id': 1}, remove=True)) self.assertEqual(None, c.find_one({'_id': 1})) self.assertEqual(None, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) # The return value changed in 2.1.2. See SERVER-6226. if client_context.version.at_least(2, 1, 2): self.assertEqual(None, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, upsert=True)) else: self.assertEqual({}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, upsert=True)) self.assertEqual({'_id': 1, 'i': 2}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, upsert=True, new=True)) self.assertEqual({'_id': 1, 'i': 2}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, fields=['i'])) self.assertEqual({'_id': 1, 'i': 4}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, new=True, fields={'i': 1})) # Test with full_response=True. result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, new=True, upsert=True, full_response=True, fields={'i': 1}) self.assertEqual({'_id': 1, 'i': 5}, result["value"]) self.assertEqual(True, result["lastErrorObject"]["updatedExisting"]) result = c.find_and_modify({'_id': 2}, {'$inc': {'i': 1}}, new=True, upsert=True, full_response=True, fields={'i': 1}) self.assertEqual({'_id': 2, 'i': 1}, result["value"]) self.assertEqual(False, result["lastErrorObject"]["updatedExisting"]) class ExtendedDict(dict): pass result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, new=True, fields={'i': 1}) self.assertFalse(isinstance(result, ExtendedDict)) c = self.db.get_collection( "test", codec_options=CodecOptions(document_class=ExtendedDict)) result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, new=True, fields={'i': 1}) self.assertTrue(isinstance(result, ExtendedDict)) def test_find_and_modify_with_sort(self): c = self.db.test c.drop() for j in range(5): c.insert({'j': j, 'i': 0}) sort = {'j': DESCENDING} self.assertEqual(4, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) sort = {'j': ASCENDING} self.assertEqual(0, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) sort = [('j', DESCENDING)] self.assertEqual(4, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) sort = [('j', ASCENDING)] self.assertEqual(0, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) sort = SON([('j', DESCENDING)]) self.assertEqual(4, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) sort = SON([('j', ASCENDING)]) self.assertEqual(0, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) try: from collections import OrderedDict sort = OrderedDict([('j', DESCENDING)]) self.assertEqual(4, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) sort = OrderedDict([('j', ASCENDING)]) self.assertEqual(0, c.find_and_modify({}, {'$inc': {'i': 1}}, sort=sort)['j']) except ImportError: pass # Test that a standard dict with two keys is rejected. sort = {'j': DESCENDING, 'foo': DESCENDING} self.assertRaises(TypeError, c.find_and_modify, {}, {'$inc': {'i': 1}}, sort=sort) def test_find_and_modify_with_manipulator(self): class AddCollectionNameManipulator(SONManipulator): def will_copy(self): return True def transform_incoming(self, son, dummy): copy = SON(son) if 'collection' in copy: del copy['collection'] return copy def transform_outgoing(self, son, collection): copy = SON(son) copy['collection'] = collection.name return copy db = self.client.pymongo_test db.add_son_manipulator(AddCollectionNameManipulator()) c = db.test c.drop() c.insert({'_id': 1, 'i': 1}) # Test correct findAndModify # With manipulators self.assertEqual({'_id': 1, 'i': 1, 'collection': 'test'}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, manipulate=True)) self.assertEqual({'_id': 1, 'i': 3, 'collection': 'test'}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, new=True, manipulate=True)) # With out manipulators self.assertEqual({'_id': 1, 'i': 3}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) self.assertEqual({'_id': 1, 'i': 5}, c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, new=True)) def test_last_status(self): # Tests many legacy API elements. # We must call getlasterror on same socket as the last operation. db = rs_or_single_client(maxPoolSize=1).pymongo_test collection = db.test_last_status collection.remove({}) collection.save({"i": 1}) collection.update({"i": 1}, {"$set": {"i": 2}}, w=0) self.assertTrue(db.last_status()["updatedExisting"]) collection.update({"i": 1}, {"$set": {"i": 500}}, w=0) self.assertFalse(db.last_status()["updatedExisting"]) def test_auto_ref_and_deref(self): # Legacy API. db = self.client.pymongo_test db.add_son_manipulator(AutoReference(db)) db.add_son_manipulator(NamespaceInjector()) db.test.a.remove({}) db.test.b.remove({}) db.test.c.remove({}) a = {"hello": u"world"} db.test.a.save(a) b = {"test": a} db.test.b.save(b) c = {"another test": b} db.test.c.save(c) a["hello"] = "mike" db.test.a.save(a) self.assertEqual(db.test.a.find_one(), a) self.assertEqual(db.test.b.find_one()["test"], a) self.assertEqual(db.test.c.find_one()["another test"]["test"], a) self.assertEqual(db.test.b.find_one(), b) self.assertEqual(db.test.c.find_one()["another test"], b) self.assertEqual(db.test.c.find_one(), c) def test_auto_ref_and_deref_list(self): # Legacy API. db = self.client.pymongo_test db.add_son_manipulator(AutoReference(db)) db.add_son_manipulator(NamespaceInjector()) db.drop_collection("users") db.drop_collection("messages") message_1 = {"title": "foo"} db.messages.save(message_1) message_2 = {"title": "bar"} db.messages.save(message_2) user = {"messages": [message_1, message_2]} db.users.save(user) db.messages.update(message_1, {"title": "buzz"}) self.assertEqual("buzz", db.users.find_one()["messages"][0]["title"]) self.assertEqual("bar", db.users.find_one()["messages"][1]["title"]) def test_object_to_dict_transformer(self): # PYTHON-709: Some users rely on their custom SONManipulators to run # before any other checks, so they can insert non-dict objects and # have them dictified before the _id is inserted or any other # processing. # Tests legacy API elements. class Thing(object): def __init__(self, value): self.value = value class ThingTransformer(SONManipulator): def transform_incoming(self, thing, dummy): return {'value': thing.value} db = self.client.foo db.add_son_manipulator(ThingTransformer()) t = Thing('value') db.test.remove() db.test.insert([t]) out = db.test.find_one() self.assertEqual('value', out.get('value')) def test_son_manipulator_outgoing(self): class Thing(object): def __init__(self, value): self.value = value class ThingTransformer(SONManipulator): def transform_outgoing(self, doc, collection): # We don't want this applied to the command return # value in pymongo.cursor.Cursor. if 'value' in doc: return Thing(doc['value']) return doc db = self.client.foo db.add_son_manipulator(ThingTransformer()) db.test.delete_many({}) db.test.insert_one({'value': 'value'}) out = db.test.find_one() self.assertTrue(isinstance(out, Thing)) self.assertEqual('value', out.value) if client_context.version.at_least(2, 6): out = next(db.test.aggregate([], cursor={})) self.assertTrue(isinstance(out, Thing)) self.assertEqual('value', out.value) def test_son_manipulator_inheritance(self): # Tests legacy API elements. class Thing(object): def __init__(self, value): self.value = value class ThingTransformer(SONManipulator): def transform_incoming(self, thing, dummy): return {'value': thing.value} def transform_outgoing(self, son, dummy): return Thing(son['value']) class Child(ThingTransformer): pass db = self.client.foo db.add_son_manipulator(Child()) t = Thing('value') db.test.remove() db.test.insert([t]) out = db.test.find_one() self.assertTrue(isinstance(out, Thing)) self.assertEqual('value', out.value) def test_disabling_manipulators(self): class IncByTwo(SONManipulator): def transform_outgoing(self, son, collection): if 'foo' in son: son['foo'] += 2 return son db = self.client.pymongo_test db.add_son_manipulator(IncByTwo()) c = db.test c.drop() c.insert({'foo': 0}) self.assertEqual(2, c.find_one()['foo']) self.assertEqual(0, c.find_one(manipulate=False)['foo']) self.assertEqual(2, c.find_one(manipulate=True)['foo']) c.drop() def test_manipulator_properties(self): db = self.client.foo self.assertEqual([], db.incoming_manipulators) self.assertEqual([], db.incoming_copying_manipulators) self.assertEqual([], db.outgoing_manipulators) self.assertEqual([], db.outgoing_copying_manipulators) db.add_son_manipulator(AutoReference(db)) db.add_son_manipulator(NamespaceInjector()) db.add_son_manipulator(ObjectIdShuffler()) self.assertEqual(1, len(db.incoming_manipulators)) self.assertEqual(db.incoming_manipulators, ['NamespaceInjector']) self.assertEqual(2, len(db.incoming_copying_manipulators)) for name in db.incoming_copying_manipulators: self.assertTrue(name in ('ObjectIdShuffler', 'AutoReference')) self.assertEqual([], db.outgoing_manipulators) self.assertEqual(['AutoReference'], db.outgoing_copying_manipulators) def test_ensure_index(self): db = self.db self.assertRaises(TypeError, db.test.ensure_index, {"hello": 1}) self.assertRaises(TypeError, db.test.ensure_index, {"hello": 1}, cache_for='foo') db.test.drop_indexes() self.assertEqual("goodbye_1", db.test.ensure_index("goodbye")) self.assertEqual(None, db.test.ensure_index("goodbye")) db.test.drop_indexes() self.assertEqual("foo", db.test.ensure_index("goodbye", name="foo")) self.assertEqual(None, db.test.ensure_index("goodbye", name="foo")) db.test.drop_indexes() self.assertEqual("goodbye_1", db.test.ensure_index("goodbye")) self.assertEqual(None, db.test.ensure_index("goodbye")) db.test.drop_index("goodbye_1") self.assertEqual("goodbye_1", db.test.ensure_index("goodbye")) self.assertEqual(None, db.test.ensure_index("goodbye")) db.drop_collection("test") self.assertEqual("goodbye_1", db.test.ensure_index("goodbye")) self.assertEqual(None, db.test.ensure_index("goodbye")) db.test.drop_index("goodbye_1") self.assertEqual("goodbye_1", db.test.ensure_index("goodbye")) self.assertEqual(None, db.test.ensure_index("goodbye")) db.test.drop_index("goodbye_1") self.assertEqual("goodbye_1", db.test.ensure_index("goodbye", cache_for=1)) time.sleep(1.2) self.assertEqual("goodbye_1", db.test.ensure_index("goodbye")) # Make sure the expiration time is updated. self.assertEqual(None, db.test.ensure_index("goodbye")) # Clean up indexes for later tests db.test.drop_indexes() def test_ensure_index_threaded(self): coll = self.db.threaded_index_creation index_docs = [] class Indexer(threading.Thread): def run(self): coll.ensure_index('foo0') coll.ensure_index('foo1') coll.ensure_index('foo2') index_docs.append(coll.index_information()) try: threads = [] for _ in range(10): t = Indexer() t.setDaemon(True) threads.append(t) for thread in threads: thread.start() joinall(threads) first = index_docs[0] for index_doc in index_docs[1:]: self.assertEqual(index_doc, first) finally: coll.drop() def test_ensure_purge_index_threaded(self): coll = self.db.threaded_index_creation class Indexer(threading.Thread): def run(self): coll.ensure_index('foo') try: coll.drop_index('foo') except OperationFailure: # The index may have already been dropped. pass coll.ensure_index('foo') coll.drop_indexes() coll.create_index('foo') try: threads = [] for _ in range(10): t = Indexer() t.setDaemon(True) threads.append(t) for thread in threads: thread.start() joinall(threads) self.assertTrue('foo_1' in coll.index_information()) finally: coll.drop() def test_ensure_unique_index_threaded(self): coll = self.db.test_unique_threaded coll.drop() coll.insert_many([{'foo': i} for i in range(10000)]) class Indexer(threading.Thread): def run(self): try: coll.ensure_index('foo', unique=True) coll.insert_one({'foo': 'bar'}) coll.insert_one({'foo': 'bar'}) except OperationFailure: pass threads = [] for _ in range(10): t = Indexer() t.setDaemon(True) threads.append(t) for i in range(10): threads[i].start() joinall(threads) self.assertEqual(10001, coll.count()) coll.drop() def test_kill_cursors_with_cursoraddress(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") coll = self.client.pymongo_test.test coll.drop() coll.insert_many([{'_id': i} for i in range(200)]) cursor = coll.find().batch_size(1) next(cursor) self.client.kill_cursors( [cursor.cursor_id], _CursorAddress(self.client.address, coll.full_name)) # Prevent killcursors from reaching the server while a getmore is in # progress -- the server logs "Assertion: 16089:Cannot kill active # cursor." time.sleep(2) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') def test_kill_cursors_with_tuple(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") coll = self.client.pymongo_test.test coll.drop() coll.insert_many([{'_id': i} for i in range(200)]) cursor = coll.find().batch_size(1) next(cursor) self.client.kill_cursors( [cursor.cursor_id], self.client.address) # Prevent killcursors from reaching the server while a getmore is in # progress -- the server logs "Assertion: 16089:Cannot kill active # cursor." time.sleep(2) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') if __name__ == "__main__": unittest.main()
santhoshtr/silpa
refs/heads/master
src/silpa/modules/hyphenator/hyph.py
3
# -*- coding: utf-8 -*- """ This is a Pure Python module to hyphenate text. It is inspired by Ruby's Text::Hyphen, but currently reads standard *.dic files, that must be installed separately. In the future it's maybe nice if dictionaries could be distributed together with this module, in a slightly prepared form, like in Ruby's Text::Hyphen. Wilbert Berendsen, March 2008 [email protected] License: LGPL. """ import sys import re #__all__ = ("Hyphenator") # cache of per-file Hyph_dict objects hdcache = {} # precompile some stuff parse_hex = re.compile(r'\^{2}([0-9a-f]{2})').sub parse = re.compile(r'(\d?)(\D?)').findall def hexrepl(matchObj): return unichr(int(matchObj.group(1), 16)) class parse_alt(object): """ Parse nonstandard hyphen pattern alternative. The instance returns a special int with data about the current position in the pattern when called with an odd value. """ def __init__(self, pat, alt): alt = alt.split(',') self.change = alt[0] if len(alt) > 2: self.index = int(alt[1]) self.cut = int(alt[2]) + 1 else: self.index = 1 self.cut = len(re.sub(r'[\d\.]', '', pat)) + 1 if pat.startswith('.'): self.index += 1 def __call__(self, val): self.index -= 1 val = int(val) if val & 1: return dint(val, (self.change, self.index, self.cut)) else: return val class dint(int): """ Just an int some other data can be stuck to in a data attribute. Call with ref=other to use the data from the other dint. """ def __new__(cls, value, data=None, ref=None): obj = int.__new__(cls, value) if ref and type(ref) == dint: obj.data = ref.data else: obj.data = data return obj class Hyph_dict(object): """ Reads a hyph_*.dic file and stores the hyphenation patterns. Parameters: -filename : filename of hyph_*.dic to read """ def __init__(self, filename): self.patterns = {} f = open(filename) charset = f.readline().strip() if charset.startswith('charset '): charset = charset[8:].strip() for pat in f: pat = pat.decode(charset).strip() if not pat or pat[0] == '%': continue # replace ^^hh with the real character pat = parse_hex(hexrepl, pat) # read nonstandard hyphen alternatives if '/' in pat: pat, alt = pat.split('/', 1) factory = parse_alt(pat, alt) else: factory = int tag, value = zip(*[(s, factory(i or "0")) for i, s in parse(pat)]) # if only zeros, skip this pattern if max(value) == 0: continue # chop zeros from beginning and end, and store start offset. start, end = 0, len(value) while not value[start]: start += 1 while not value[end-1]: end -= 1 self.patterns[''.join(tag)] = start, value[start:end] f.close() self.cache = {} self.maxlen = max(map(len, self.patterns.keys())) def positions(self, word): """ Returns a list of positions where the word can be hyphenated. E.g. for the dutch word 'lettergrepen' this method returns the list [3, 6, 9]. Each position is a 'data int' (dint) with a data attribute. If the data attribute is not None, it contains a tuple with information about nonstandard hyphenation at that point: (change, index, cut) change: is a string like 'ff=f', that describes how hyphenation should take place. index: where to substitute the change, counting from the current point cut: how many characters to remove while substituting the nonstandard hyphenation """ word = word.lower() points = self.cache.get(word) if points is None: prepWord = '.%s.' % word res = [0] * (len(prepWord) + 1) for i in range(len(prepWord) - 1): for j in range(i + 1, min(i + self.maxlen, len(prepWord)) + 1): p = self.patterns.get(prepWord[i:j]) if p: offset, value = p s = slice(i + offset, i + offset + len(value)) res[s] = map(max, value, res[s]) points = [dint(i - 1, ref=r) for i, r in enumerate(res) if r % 2] self.cache[word] = points return points class Hyphenator(): """ Reads a hyph_*.dic file and stores the hyphenation patterns. Provides methods to hyphenate strings in various ways. Parameters: -filename : filename of hyph_*.dic to read -left: make the first syllabe not shorter than this -right: make the last syllabe not shorter than this -cache: if true (default), use a cached copy of the dic file, if possible left and right may also later be changed: h = Hyphenator(file) h.left = 1 """ #self.left=2 #def __init__(self, left=2, right=2, cache=True): left = 1 right = 1 def loadHyphDict(self,lang, cache=True): filename="./rules/hyph_"+lang+".dic" if not cache or filename not in hdcache: hdcache[filename] = Hyph_dict(filename) self.hd = hdcache[filename] def positions(self, word): """ Returns a list of positions where the word can be hyphenated. See also Hyph_dict.positions. The points that are too far to the left or right are removed. """ right = len(word) - self.right return [i for i in self.hd.positions(word) if self.left <= i <= right] def iterate(self, word): """ Iterate over all hyphenation possibilities, the longest first. """ if isinstance(word, str): word = word.decode('latin1') for p in reversed(self.positions(word)): if p.data: # get the nonstandard hyphenation data change, index, cut = p.data if word.isupper(): change = change.upper() c1, c2 = change.split('=') yield word[:p+index] + c1, c2 + word[p+index+cut:] else: yield word[:p], word[p:] def wrap(self, word, width, hyphen='-'): """ Return the longest possible first part and the last part of the hyphenated word. The first part has the hyphen already attached. Returns None, if there is no hyphenation point before width, or if the word could not be hyphenated. """ width -= len(hyphen) for w1, w2 in self.iterate(word): if len(w1) <= width: return w1 + hyphen, w2 def inserted(self, word, hyphen='-'): """ Returns the word as a string with all the possible hyphens inserted. E.g. for the dutch word 'lettergrepen' this method returns the string 'let-ter-gre-pen'. The hyphen string to use can be given as the second parameter, that defaults to '-'. """ if isinstance(word, str): word = word.decode('latin1') l = list(word) for p in reversed(self.positions(word)): if p.data: # get the nonstandard hyphenation data change, index, cut = p.data if word.isupper(): change = change.upper() l[p + index : p + index + cut] = change.replace('=', hyphen) else: l.insert(p, hyphen) return ''.join(l) if __name__ == "__main__": h = Hyphenator() h.loadHyphDict("ta_IN") print h.inserted(u'வணக்கம். என் பெயர் ராமதாசன்.')
gnowxilef/Wox
refs/heads/master
PythonHome/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
1093
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. # Copyright 2009 Raymond Hettinger, released under the MIT License. # http://code.activestate.com/recipes/576693/ try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
carlodri/moviepy
refs/heads/master
moviepy/audio/fx/audio_fadeout.py
18
from moviepy.decorators import audio_video_fx, requires_duration import numpy as np @audio_video_fx @requires_duration def audio_fadeout(clip, duration): """ Return a sound clip where the sound fades out progressively over ``duration`` seconds at the end of the clip. """ def fading(gf,t): gft = gf(t) if np.isscalar(t): factor = min(1.0 * (clip.duration - t) / duration, 1) factor = np.array([factor,factor]) else: factor = np.minimum( 1.0 * (clip.duration - t) / duration, 1) factor = np.vstack([factor,factor]).T return factor * gft return clip.fl(fading, keep_duration = True)
netsec-ethz/scion
refs/heads/scionlab
acceptance/common/go.py
2
# Copyright 2020 Anapaya Systems from plumbum import local from plumbum.machines import LocalMachine from typing import Tuple from pathlib import Path def test(package: str) -> Tuple[int, str, str]: """ Runs the Go tests in package. Set argument go_from_bazel to true to retrieve the go binary from the bazel cache. The return value is a (retcode, stdout, stderr) plumbum tuple. """ local.env["ACCEPTANCE"] = 1 go = _go_cmd() go = go["test", package] return go.run(retcode=None) def _go_cmd() -> LocalMachine: bazel_info_output = local["bazel"]("info", "output_base") # Remove new line at end of output base_path = bazel_info_output.strip() go_bin_path = Path(base_path) / "external" / "go_sdk" / "bin" / "go" return local[str(go_bin_path)]
ramezquitao/pyoptools
refs/heads/master
pyoptools/misc/resources/__init__.py
2
from .resources import (detectCPUs, detectOpenCL, has_double_support, has_amd_double_support) __all__ = ["detectCPUs", "detectOpenCL", "has_double_support", "has_amd_double_support"]
Aiacos/DevPyLib
refs/heads/master
mayaLib/rigLib/utils/proxyGeo.py
1
__author__ = 'Lorenzo Argentieri' import pymel.core as pm from mayaLib.rigLib.utils import util, common from mayaLib.rigLib.utils import name from mayaLib.rigLib.utils import skin def invertSelection(shape, faces): pm.select(shape+'.f[*]') pm.select(faces, deselect=True) #mel.eval('InvertSelection;') return pm.ls(sl=True) class ProxyGeo(): def __init__(self, geo, doParentCnst=True, threshold=0.45): self.proxyGeoList = [] pivotLocator = pm.spaceLocator(n='pivotGeo_LOC') # Create proxy geo Group self.shapeGrp = pm.group(n='fastGeo_GRP', em=True) # Get Shape and skin from Object skinCluster = skin.findRelatedSkinCluster(geo) if not skinCluster: print 'Missing SkinCluster' else: self.skin = skinCluster # Get joint influence of the skin influnces = self.skin.getInfluence(q=True) # influences is joint for joint in influnces: # duplicate mesh for a control transform, dupliShape = self.duplicateSourceMesh(obj=geo, joint=joint) common.centerPivot(transform, pivotLocator) # copy skinCluster skin.copyBind(pm.ls(geo)[0], transform) # delete faces in the new shape based on selected joint self.deleteVertex(joint=joint, newShape=dupliShape, threshold=threshold) # delete non deformer history common.deleteHistory(dupliShape) # parent under proxy group pm.parent(transform, self.shapeGrp) self.proxyGeoList.append(transform) # parentConstraint with joint if doParentCnst: pm.parentConstraint(joint, transform, mo=True) # delete pivot locator pm.delete(pivotLocator) def duplicateSourceMesh(self, obj, joint): """ :param obj: :param ctrl: :return: Mesh Shape for the Control """ dupliObj = pm.duplicate(obj) pm.rename(dupliObj, name.removeSuffix(joint)+'_PRX') return dupliObj[0], dupliObj[0].getShape() def deleteVertex(self, joint, newShape, threshold=0.45): verts = [] skincluster = skin.findRelatedSkinCluster(newShape) for x in range(pm.polyEvaluate(newShape, v=1)): v = pm.skinPercent(skincluster, '%s.vtx[%d]' % (newShape, x), transform=joint, q=1) if v > threshold: verts.append('%s.vtx[%d]' % (newShape, x)) pm.select(verts) faces = pm.polyListComponentConversion(verts, fromVertex=True, toFace=True) # pm.select(faces) toDelete = invertSelection(newShape, faces) pm.polyDelFacet(toDelete, ch=False) def getProxyGeoList(self): return self.proxyGeoList def getFastGeoGroup(self): return self.shapeGrp if __name__ == "__main__": prxGeo = ProxyGeo()
Qinusty/rethinkdb
refs/heads/next
test/memcached_workloads/multi_serial_mix.py
21
#!/usr/bin/env python # Copyright 2010-2012 RethinkDB, all rights reserved. from __future__ import print_function import sys, os sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import multiprocessing, time, pickle import memcached_workload_common, serial_mix from vcoptparse import * def child(opts, log_path, load, save): # This is run in a separate process import sys # TODO: this overwrites existing log files sys.stdout = sys.stderr = file(log_path, "w") if load is None: clone, deleted = {}, set() else: print("Loading from %r..." % load) with open(load) as f: clone, deleted = pickle.load(f) print("Starting test against server at %s..." % opts["address"]) with memcached_workload_common.make_memcache_connection(opts) as mc: serial_mix.test(opts, mc, clone, deleted) if save is not None: print("Saving to %r..." % save) with open(save, "w") as f: pickle.dump((clone, deleted), f) print("Done with test.") op = serial_mix.option_parser_for_serial_mix() op["num_testers"] = IntFlag("--num-testers", 16) op["load"] = StringFlag("--load", None) op["save"] = StringFlag("--save", None) opts = op.parse(sys.argv) shutdown_grace_period = 15 tester_log_dir = "multi_serial_mix_out" if not os.path.isdir(tester_log_dir): os.mkdir(tester_log_dir) processes = [] try: print("Starting %d child processes, writing to %r" % (opts["num_testers"], tester_log_dir)) for id in xrange(opts["num_testers"]): log_path = os.path.join(tester_log_dir, "%d.txt" % id) load_path = opts["load"] + "_%d" % id if opts["load"] is not None else None save_path = opts["save"] + "_%d" % id if opts["save"] is not None else None opts2 = dict(opts) opts2["keysuffix"] = "_%d" % id # Prevent collisions between tests process = multiprocessing.Process(target=child, args=(opts2, log_path, load_path, save_path)) process.start() processes.append((process, id)) print("Waiting for child processes...") start_time = time.time() def time_remaining(): time_elapsed = time.time() - start_time # Give subprocesses lots of extra time return opts["duration"] * 2 - time_elapsed + 1 for process, id in processes: tr = time_remaining() if tr <= 0: tr = shutdown_grace_period process.join(tr) stuck = sorted(id for (process, id) in processes if process.is_alive()) failed = sorted(id for (process, id) in processes if not process.is_alive() and process.exitcode != 0) if stuck or failed: for id in stuck + failed: with file(os.path.join(tester_log_dir, str(id) + ".txt")) as f: for line in f: sys.stdout.write(line) if len(stuck) == opts["num_testers"]: raise ValueError("All %d processes did not finish in time." % opts["num_testers"]) elif len(failed) == opts["num_testers"]: raise ValueError("All %d processes failed." % opts["num_testers"]) else: raise ValueError( "Of processes [1 ... %d], the following did not finish in time: " "%s and the following failed: %s" % (opts["num_testers"], stuck, failed) ) finally: for (process, id) in processes: if process.is_alive(): process.terminate() print("Done with multi_serial_mix.")
youdonghai/intellij-community
refs/heads/master
python/testData/override/overriddenMethodRaisesNotImplementedError.py
79
class A: def m(self): """Abstract method.""" raise NotImplementedError('Should not be called directly') class B(A): pass
juharris/tensorflow
refs/heads/master
tensorflow/contrib/distributions/python/kernel_tests/gamma_test.py
8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from scipy import stats import tensorflow as tf class GammaTest(tf.test.TestCase): def testGammaShape(self): with self.test_session(): alpha = tf.constant([3.0] * 5) beta = tf.constant(11.0) gamma = tf.contrib.distributions.Gamma(alpha=alpha, beta=beta) self.assertEqual(gamma.batch_shape().eval(), (5,)) self.assertEqual(gamma.get_batch_shape(), tf.TensorShape([5])) self.assertAllEqual(gamma.event_shape().eval(), []) self.assertEqual(gamma.get_event_shape(), tf.TensorShape([])) def testGammaLogPDF(self): with self.test_session(): batch_size = 6 alpha = tf.constant([2.0] * batch_size) beta = tf.constant([3.0] * batch_size) alpha_v = 2.0 beta_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) gamma = tf.contrib.distributions.Gamma(alpha=alpha, beta=beta) expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) log_pdf = gamma.log_pdf(x) self.assertEqual(log_pdf.get_shape(), (6,)) self.assertAllClose(log_pdf.eval(), expected_log_pdf) pdf = gamma.pdf(x) self.assertEqual(pdf.get_shape(), (6,)) self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf)) def testGammaLogPDFMultidimensional(self): with self.test_session(): batch_size = 6 alpha = tf.constant([[2.0, 4.0]] * batch_size) beta = tf.constant([[3.0, 4.0]] * batch_size) alpha_v = np.array([2.0, 4.0]) beta_v = np.array([3.0, 4.0]) x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T gamma = tf.contrib.distributions.Gamma(alpha=alpha, beta=beta) expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) log_pdf = gamma.log_pdf(x) log_pdf_values = log_pdf.eval() self.assertEqual(log_pdf.get_shape(), (6, 2)) self.assertAllClose(log_pdf_values, expected_log_pdf) pdf = gamma.pdf(x) pdf_values = pdf.eval() self.assertEqual(pdf.get_shape(), (6, 2)) self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testGammaLogPDFMultidimensionalBroadcasting(self): with self.test_session(): batch_size = 6 alpha = tf.constant([[2.0, 4.0]] * batch_size) beta = tf.constant(3.0) alpha_v = np.array([2.0, 4.0]) beta_v = 3.0 x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T gamma = tf.contrib.distributions.Gamma(alpha=alpha, beta=beta) expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) log_pdf = gamma.log_pdf(x) log_pdf_values = log_pdf.eval() self.assertEqual(log_pdf.get_shape(), (6, 2)) self.assertAllClose(log_pdf_values, expected_log_pdf) pdf = gamma.pdf(x) pdf_values = pdf.eval() self.assertEqual(pdf.get_shape(), (6, 2)) self.assertAllClose(pdf_values, np.exp(expected_log_pdf)) def testGammaCDF(self): with self.test_session(): batch_size = 6 alpha = tf.constant([2.0] * batch_size) beta = tf.constant([3.0] * batch_size) alpha_v = 2.0 beta_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) gamma = tf.contrib.distributions.Gamma(alpha=alpha, beta=beta) expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v) cdf = gamma.cdf(x) self.assertEqual(cdf.get_shape(), (6,)) self.assertAllClose(cdf.eval(), expected_cdf) def testGammaMean(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = tf.contrib.distributions.Gamma(alpha=alpha_v, beta=beta_v) expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v) self.assertEqual(gamma.mean().get_shape(), (3,)) self.assertAllClose(gamma.mean().eval(), expected_means) def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self): with self.test_session(): alpha_v = np.array([5.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = tf.contrib.distributions.Gamma( alpha=alpha_v, beta=beta_v) expected_modes = (alpha_v - 1) / beta_v self.assertEqual(gamma.mode().get_shape(), (3,)) self.assertAllClose(gamma.mode().eval(), expected_modes) def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self): with self.test_session(): # Mode will not be defined for the first entry. alpha_v = np.array([0.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = tf.contrib.distributions.Gamma( alpha=alpha_v, beta=beta_v, allow_nan_stats=False) with self.assertRaisesOpError("x < y"): gamma.mode().eval() def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self): with self.test_session(): # Mode will not be defined for the first entry. alpha_v = np.array([0.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = tf.contrib.distributions.Gamma( alpha=alpha_v, beta=beta_v, allow_nan_stats=True) expected_modes = (alpha_v - 1) / beta_v expected_modes[0] = np.nan self.assertEqual(gamma.mode().get_shape(), (3,)) self.assertAllClose(gamma.mode().eval(), expected_modes) def testGammaVariance(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = tf.contrib.distributions.Gamma(alpha=alpha_v, beta=beta_v) expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v) self.assertEqual(gamma.variance().get_shape(), (3,)) self.assertAllClose(gamma.variance().eval(), expected_variances) def testGammaStd(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = tf.contrib.distributions.Gamma(alpha=alpha_v, beta=beta_v) expected_std = stats.gamma.std(alpha_v, scale=1 / beta_v) self.assertEqual(gamma.std().get_shape(), (3,)) self.assertAllClose(gamma.std().eval(), expected_std) def testGammaEntropy(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v) gamma = tf.contrib.distributions.Gamma(alpha=alpha_v, beta=beta_v) self.assertEqual(gamma.entropy().get_shape(), (3,)) self.assertAllClose(gamma.entropy().eval(), expected_entropy) def testGammaSampleSmallAlpha(self): with tf.Session(): alpha_v = 0.05 beta_v = 1.0 alpha = tf.constant(alpha_v) beta = tf.constant(beta_v) n = 100000 gamma = tf.contrib.distributions.Gamma(alpha=alpha, beta=beta) samples = gamma.sample_n(n, seed=137) sample_values = samples.eval() self.assertEqual(samples.get_shape(), (n,)) self.assertEqual(sample_values.shape, (n,)) self.assertAllClose( sample_values.mean(), stats.gamma.mean( alpha_v, scale=1 / beta_v), atol=.01) self.assertAllClose( sample_values.var(), stats.gamma.var(alpha_v, scale=1 / beta_v), atol=.15) self.assertTrue(self._kstest(alpha_v, beta_v, sample_values)) def testGammaSample(self): with tf.Session(): alpha_v = 4.0 beta_v = 3.0 alpha = tf.constant(alpha_v) beta = tf.constant(beta_v) n = 100000 gamma = tf.contrib.distributions.Gamma(alpha=alpha, beta=beta) samples = gamma.sample_n(n, seed=137) sample_values = samples.eval() self.assertEqual(samples.get_shape(), (n,)) self.assertEqual(sample_values.shape, (n,)) self.assertAllClose( sample_values.mean(), stats.gamma.mean( alpha_v, scale=1 / beta_v), atol=.01) self.assertAllClose(sample_values.var(), stats.gamma.var(alpha_v, scale=1 / beta_v), atol=.15) self.assertTrue(self._kstest(alpha_v, beta_v, sample_values)) def testGammaSampleMultiDimensional(self): with tf.Session(): alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100 beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1 gamma = tf.contrib.distributions.Gamma(alpha=alpha_v, beta=beta_v) n = 10000 samples = gamma.sample_n(n, seed=137) sample_values = samples.eval() self.assertEqual(samples.get_shape(), (n, 10, 100)) self.assertEqual(sample_values.shape, (n, 10, 100)) zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100 alpha_bc = alpha_v + zeros beta_bc = beta_v + zeros self.assertAllClose( sample_values.mean(axis=0), stats.gamma.mean( alpha_bc, scale=1 / beta_bc), rtol=.035) self.assertAllClose( sample_values.var(axis=0), stats.gamma.var(alpha_bc, scale=1 / beta_bc), atol=4.5) fails = 0 trials = 0 for ai, a in enumerate(np.reshape(alpha_v, [-1])): for bi, b in enumerate(np.reshape(beta_v, [-1])): s = sample_values[:, bi, ai] trials += 1 fails += 0 if self._kstest(a, b, s) else 1 self.assertLess(fails, trials * 0.03) def _kstest(self, alpha, beta, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf) # Return True when the test passes. return ks < 0.02 def testGammaPdfOfSampleMultiDims(self): with tf.Session() as sess: gamma = tf.contrib.distributions.Gamma(alpha=[7., 11.], beta=[[5.], [6.]]) num = 50000 samples = gamma.sample_n(num, seed=137) pdfs = gamma.pdf(samples) sample_vals, pdf_vals = sess.run([samples, pdfs]) self.assertEqual(samples.get_shape(), (num, 2, 2)) self.assertEqual(pdfs.get_shape(), (num, 2, 2)) self.assertAllClose( stats.gamma.mean([[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])), sample_vals.mean(axis=0), atol=.1) self.assertAllClose( stats.gamma.var([[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])), sample_vals.var(axis=0), atol=.1) self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02) self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02) self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02) self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02) def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3): s_p = zip(sample_vals, pdf_vals) prev = (0, 0) total = 0 for k in sorted(s_p, key=lambda x: x[0]): pair_pdf = (k[1] + prev[1]) / 2 total += (k[0] - prev[0]) * pair_pdf prev = k self.assertNear(1., total, err=err) def testGammaNonPositiveInitializationParamsRaises(self): with self.test_session(): alpha_v = tf.constant(0.0, name="alpha") beta_v = tf.constant(1.0, name="beta") gamma = tf.contrib.distributions.Gamma(alpha=alpha_v, beta=beta_v, validate_args=True) with self.assertRaisesOpError("alpha"): gamma.mean().eval() alpha_v = tf.constant(1.0, name="alpha") beta_v = tf.constant(0.0, name="beta") gamma = tf.contrib.distributions.Gamma(alpha=alpha_v, beta=beta_v, validate_args=True) with self.assertRaisesOpError("beta"): gamma.mean().eval() def testGammaWithSoftplusAlphaBeta(self): with self.test_session(): alpha_v = tf.constant([0.0, -2.1], name="alpha") beta_v = tf.constant([1.0, -3.6], name="beta") gamma = tf.contrib.distributions.GammaWithSoftplusAlphaBeta( alpha=alpha_v, beta=beta_v) self.assertAllEqual(tf.nn.softplus(alpha_v).eval(), gamma.alpha.eval()) self.assertAllEqual(tf.nn.softplus(beta_v).eval(), gamma.beta.eval()) if __name__ == "__main__": tf.test.main()
ljhljh235/AutoRest
refs/heads/master
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Http/auto_rest_http_infrastructure_test_service/models/__init__.py
31
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .error import Error, ErrorException from .a import A, MyException from .b import B from .c import C from .d import D __all__ = [ 'Error', 'ErrorException', 'A', 'MyException', 'B', 'C', 'D', ]
cedadev/dapbench
refs/heads/master
dapbench/thredds/lib/configuration.py
1
# BSD Licence # Copyright (c) 2012, Science & Technology Facilities Council (STFC) # All rights reserved. # # See the LICENSE file in the source distribution of this software for # the full license text. ''' Checker configuration class. Created on 29 Sep 2011 @author: rwilkinson ''' class Configuration(object): """Checker configuration. """ def __init__(self, key_file, cert_file, debug, recurse=False, listonly=False, quiet=False, services_to_test="", service_extensions=None, public_service_extensions=None, forbidden_service_extensions=None, required_container_properties=None, required_file_properties=None): """ @param key_file - file containing the user's private key @param cert_file - file containing the user's certificate @param services_to_test - list of service types for which to test access @param debug - if True, output debugging information @param recurse - if True, recurse into linked catalogs @param listonly - if True, only list datasets, otherwise check access @param quiet - if True, produce minimal output @param service_extensions - extensions for services that can have multiple extensions @param public_service_extensions - service types and extensions for which public access is expected @param forbidden_service_extensions - service types and extensions for which no access is expected @param required_container_properties - dataset properties required for container datasets @param required_file_properties - dataset properties required for file datasets """ self.key_file = key_file self.cert_file = cert_file self.services_to_test = ( [s.strip() for s in services_to_test.split(',')] if services_to_test else []) self.debug = debug self.recurse = recurse self.listonly = listonly self.quiet = quiet self.service_extensions = self.parse_dict_of_lists(service_extensions) self.public_service_extensions = self.parse_dict_of_lists(public_service_extensions) self.forbidden_service_extensions = self.parse_dict_of_lists(forbidden_service_extensions) self.required_container_properties = ( [p.strip() for p in required_container_properties.split(',')] if required_container_properties else []) self.required_file_properties = ( [p.strip() for p in required_file_properties.split(',')] if required_file_properties else []) @staticmethod def parse_dict_of_lists(in_str): """Parses a string in the format key1:item11,item12,...;key2:item21,item22...;... into a dict with keys key1, key2, ... and values lists [item11, item12] and so on. @param in_str - string to parse @return dict of lists of items """ result = {} if in_str is not None: outer_list = in_str.split(';') for item in outer_list: (key, sep, inner_str) = item.partition(':') inner_list = inner_str.split(',') result[key] = inner_list return result
asedunov/intellij-community
refs/heads/master
python/testData/formatter/commentInEmptyTuple.py
80
var_name = ( # comment )
petrus-v/odoo
refs/heads/8.0
addons/l10n_pl/__openerp__.py
277
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 - now Grzegorz Grzelak [email protected] # All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'Poland - Accounting', 'version' : '1.02', 'author' : 'Grzegorz Grzelak (OpenGLOBE)', 'website': 'http://www.openglobe.pl', 'category' : 'Localization/Account Charts', 'description': """ This is the module to manage the accounting chart and taxes for Poland in OpenERP. ================================================================================== To jest moduł do tworzenia wzorcowego planu kont, podatków, obszarów podatkowych i rejestrów podatkowych. Moduł ustawia też konta do kupna i sprzedaży towarów zakładając, że wszystkie towary są w obrocie hurtowym. Niniejszy moduł jest przeznaczony dla odoo 8.0. Wewnętrzny numer wersji OpenGLOBE 1.02 """, 'depends' : ['account', 'base_iban', 'base_vat', 'account_chart'], 'demo' : [], 'data' : ['account_tax_code.xml', 'account_chart.xml', 'account_tax.xml', 'fiscal_position.xml', 'country_pl.xml', 'l10n_chart_pl_wizard.xml' ], 'auto_install': False, 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
elfnor/sverchok
refs/heads/master
nodes/list_struct/repeater.py
3
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### import bpy from bpy.props import BoolProperty, IntProperty, StringProperty from sverchok.node_tree import SverchCustomTreeNode from sverchok.data_structure import (updateNode, changable_sockets) class ListRepeaterNode(bpy.types.Node, SverchCustomTreeNode): ''' List repeater ''' bl_idname = 'ListRepeaterNode' bl_label = 'List Repeater' bl_icon = 'OUTLINER_OB_EMPTY' level = IntProperty(name='level', default=1, min=0, update=updateNode) number = IntProperty(name='number', default=1, min=1, update=updateNode) unwrap = BoolProperty(name='unwrap', default=False, update=updateNode) typ = StringProperty(name='typ', default='') newsock = BoolProperty(name='newsock', default=False) def draw_buttons(self, context, layout): layout.prop(self, "level", text="level") layout.prop(self, "unwrap", text="unwrap") def sv_init(self, context): self.inputs.new('StringsSocket', "Data", "Data") self.inputs.new('StringsSocket', "Number", "Number").prop_name = 'number' self.outputs.new('StringsSocket', "Data", "Data") def update(self): if 'Data' in self.inputs and self.inputs['Data'].links: inputsocketname = 'Data' outputsocketname = ['Data', ] changable_sockets(self, inputsocketname, outputsocketname) def process(self): if self.inputs['Data'].is_linked: data = self.inputs['Data'].sv_get() if self.inputs['Number'].is_linked: tmp = self.inputs['Number'].sv_get() Number = tmp[0] else: Number = [self.number] if self.outputs['Data'].is_linked: out_ = self.count(data, self.level, Number) if self.unwrap: if len(out_) > 0: out = [] for o in out_: out.extend(o) else: out = out_ self.outputs['Data'].sv_set(out) def count(self, data, level, number, cou=0): if level: out = [] for idx, obj in enumerate(data): out.append(self.count(obj, level - 1, number, idx)) else: out = [] indx = min(cou, len(number) - 1) for i in range(int(number[indx])): out.append(data) return out def register(): bpy.utils.register_class(ListRepeaterNode) def unregister(): bpy.utils.unregister_class(ListRepeaterNode)
grengojbo/st2
refs/heads/master
st2actions/st2actions/cmd/actionrunner.py
3
import eventlet import os import sys from st2actions import config from st2actions import scheduler, worker from st2common import log as logging from st2common.service_setup import setup as common_setup from st2common.service_setup import teardown as common_teardown LOG = logging.getLogger(__name__) eventlet.monkey_patch( os=True, select=True, socket=True, thread=False if '--use-debugger' in sys.argv else True, time=True) def _setup(): common_setup(service='actionrunner', config=config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True) def _run_worker(): LOG.info('(PID=%s) Worker started.', os.getpid()) components = [ scheduler.get_scheduler(), worker.get_worker() ] try: for component in components: component.start() for component in components: component.wait() except (KeyboardInterrupt, SystemExit): LOG.info('(PID=%s) Worker stopped.', os.getpid()) errors = False for component in components: try: component.shutdown() except: LOG.exception('Unable to shutdown %s.', component.__class__.__name__) errors = True if errors: return 1 except: LOG.exception('(PID=%s) Worker unexpectedly stopped.', os.getpid()) return 1 return 0 def _teardown(): common_teardown() def main(): try: _setup() return _run_worker() except SystemExit as exit_code: sys.exit(exit_code) except: LOG.exception('(PID=%s) Worker quit due to exception.', os.getpid()) return 1 finally: _teardown()
lvh/pyopenssl
refs/heads/master
examples/sni/server.py
63
# Copyright (C) Jean-Paul Calderone # See LICENSE for details. if __name__ == '__main__': import server raise SystemExit(server.main()) from sys import stdout from socket import SOL_SOCKET, SO_REUSEADDR, socket from OpenSSL.crypto import FILETYPE_PEM, load_privatekey, load_certificate from OpenSSL.SSL import TLSv1_METHOD, Context, Connection def load(domain): crt = open(domain + ".crt") key = open(domain + ".key") result = ( load_privatekey(FILETYPE_PEM, key.read()), load_certificate(FILETYPE_PEM, crt.read())) crt.close() key.close() return result def main(): """ Run an SNI-enabled server which selects between a few certificates in a C{dict} based on the handshake request it receives from a client. """ port = socket() port.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) port.bind(('', 8443)) port.listen(3) print 'Accepting...', stdout.flush() server, addr = port.accept() print 'accepted', addr server_context = Context(TLSv1_METHOD) server_context.set_tlsext_servername_callback(pick_certificate) server_ssl = Connection(server_context, server) server_ssl.set_accept_state() server_ssl.do_handshake() server.close() certificates = { "example.invalid": load("example.invalid"), "another.invalid": load("another.invalid"), } def pick_certificate(connection): try: key, cert = certificates[connection.get_servername()] except KeyError: pass else: new_context = Context(TLSv1_METHOD) new_context.use_privatekey(key) new_context.use_certificate(cert) connection.set_context(new_context)
pando85/gourmet
refs/heads/master
gourmet/test_plugin_loader.py
7
import unittest import tempfile import os import gourmet.gglobals # clear out Gourmet's DB tmpdir = tempfile.mktemp() os.makedirs(tmpdir) gourmet.gglobals.gourmetdir = tmpdir import gourmet.GourmetRecipeManager import gourmet.backends.db gourmet.backends.db.RecData.__single = None gourmet.GourmetRecipeManager.GourmetApplication.__single = None # end clearing out code from plugin_loader import get_master_loader class Test (unittest.TestCase): def testDefaultPlugins (self): ml = get_master_loader() ml.load_active_plugins() print 'active:',ml.active_plugins print 'instantiated:',ml.instantiated_plugins assert(not ml.errors) def testAvailablePlugins (self): ml = get_master_loader() for st in ml.available_plugin_sets: if st not in ml.active_plugins: ml.activate_plugin_set(st) if __name__ == '__main__': unittest.main()
mikewiebe-ansible/ansible
refs/heads/devel
lib/ansible/modules/identity/ipa/ipa_service.py
8
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: ipa_service author: Cédric Parent (@cprh) short_description: Manage FreeIPA service description: - Add and delete an IPA service using IPA API. options: krbcanonicalname: description: - Principal of the service. - Can not be changed as it is the unique identifier. required: true aliases: ["name"] type: str hosts: description: - Defines the list of 'ManagedBy' hosts. required: false type: list elements: str force: description: - Force principal name even if host is not in DNS. required: false type: bool state: description: State to ensure. required: false default: present choices: ["absent", "present"] type: str extends_documentation_fragment: ipa.documentation version_added: "2.5" ''' EXAMPLES = r''' - name: Ensure service is present ipa_service: name: http/host01.example.com state: present ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret - name: Ensure service is absent ipa_service: name: http/host01.example.com state: absent ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret - name: Changing Managing hosts list ipa_service: name: http/host01.example.com host: - host01.example.com - host02.example.com ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret ''' RETURN = r''' service: description: Service as returned by IPA API. returned: always type: dict ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ipa import IPAClient, ipa_argument_spec from ansible.module_utils._text import to_native class ServiceIPAClient(IPAClient): def __init__(self, module, host, port, protocol): super(ServiceIPAClient, self).__init__(module, host, port, protocol) def service_find(self, name): return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name}) def service_add(self, name, service): return self._post_json(method='service_add', name=name, item=service) def service_mod(self, name, service): return self._post_json(method='service_mod', name=name, item=service) def service_del(self, name): return self._post_json(method='service_del', name=name) def service_disable(self, name): return self._post_json(method='service_disable', name=name) def service_add_host(self, name, item): return self._post_json(method='service_add_host', name=name, item={'host': item}) def service_remove_host(self, name, item): return self._post_json(method='service_remove_host', name=name, item={'host': item}) def get_service_dict(force=None, krbcanonicalname=None): data = {} if force is not None: data['force'] = force if krbcanonicalname is not None: data['krbcanonicalname'] = krbcanonicalname return data def get_service_diff(client, ipa_host, module_service): non_updateable_keys = ['force', 'krbcanonicalname'] for key in non_updateable_keys: if key in module_service: del module_service[key] return client.get_diff(ipa_data=ipa_host, module_data=module_service) def ensure(module, client): name = module.params['krbcanonicalname'] state = module.params['state'] hosts = module.params['hosts'] ipa_service = client.service_find(name=name) module_service = get_service_dict(force=module.params['force']) changed = False if state in ['present', 'enabled', 'disabled']: if not ipa_service: changed = True if not module.check_mode: client.service_add(name=name, service=module_service) else: diff = get_service_diff(client, ipa_service, module_service) if len(diff) > 0: changed = True if not module.check_mode: data = {} for key in diff: data[key] = module_service.get(key) client.service_mod(name=name, service=data) if hosts is not None: if 'managedby_host' in ipa_service: for host in ipa_service['managedby_host']: if host not in hosts: if not module.check_mode: client.service_remove_host(name=name, item=host) changed = True for host in hosts: if host not in ipa_service['managedby_host']: if not module.check_mode: client.service_add_host(name=name, item=host) changed = True else: for host in hosts: if not module.check_mode: client.service_add_host(name=name, item=host) changed = True else: if ipa_service: changed = True if not module.check_mode: client.service_del(name=name) return changed, client.service_find(name=name) def main(): argument_spec = ipa_argument_spec() argument_spec.update( krbcanonicalname=dict(type='str', required=True, aliases=['name']), force=dict(type='bool', required=False), hosts=dict(type='list', required=False, elements='str'), state=dict(type='str', required=False, default='present', choices=['present', 'absent'])) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) client = ServiceIPAClient(module=module, host=module.params['ipa_host'], port=module.params['ipa_port'], protocol=module.params['ipa_prot']) try: client.login(username=module.params['ipa_user'], password=module.params['ipa_pass']) changed, host = ensure(module, client) module.exit_json(changed=changed, host=host) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
basicworld/py_structure
refs/heads/master
chap5_squeue.py
2
# -*- coding: utf-8 -*- """ 使用python的list实现队列功能 自己管理list的存储 自动扩容 """ class QueueUnderflow(ValueError): pass class SQueue(object): """ 对满就是self._num = self._len 下一个空位的下标是:(self._head + self._num) % self._len """ def __init__(self, init_len=8): self._len = init_len self._elems = [0] * self._len self._head = 0 # 队首下标 self._num = 0 # 元素个数 def is_empty(self): return self._num == 0 def peek(self): if self.is_empty(): raise QueueUnderflow return self._elems[self._head] def dequeue(self): if self.is_empty(): raise QueueUnderflow e = self._elems[self._head] self._head = (self._head + 1) % self._len self._num -= 1 return e def enqueue(self, elem): if self._num == self._len: self.__extend() self._elems[(self._head + self._num) % self._len] = elem self._num += 1 def __extend(self): old_len = self._len self._len *= 2 new_elems = [0] * self._len # 设置新队列 for i in range(old_len): # 复制旧元素到新队列 new_elems[i] = self._elems[(self._head + i) % old_len] self._elems, self._head = new_elems, 0 if __name__ == '__main__': q = SQueue() for i in range(10): q.enqueue(i) while not q.is_empty(): print q.dequeue()
todaychi/hue
refs/heads/master
desktop/core/ext-py/cffi-1.5.2/setup_base.py
12
import sys, os from setup import include_dirs, sources, libraries, define_macros from setup import library_dirs, extra_compile_args, extra_link_args if __name__ == '__main__': from distutils.core import setup from distutils.extension import Extension standard = '__pypy__' not in sys.builtin_module_names setup(packages=['cffi'], requires=['pycparser'], ext_modules=[Extension(name = '_cffi_backend', include_dirs=include_dirs, sources=sources, libraries=libraries, define_macros=define_macros, library_dirs=library_dirs, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, )] * standard)
0x90sled/catapult
refs/heads/master
dashboard/dashboard/add_point_test.py
1
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import math import unittest import mock import webapp2 import webtest from google.appengine.api import datastore_errors from google.appengine.ext import ndb from dashboard import add_point from dashboard import add_point_queue from dashboard import bot_whitelist from dashboard import layered_cache from dashboard import testing_common from dashboard import units_to_direction from dashboard.models import anomaly from dashboard.models import anomaly_config from dashboard.models import graph_data from dashboard.models import sheriff # A limit to the number of entities that can be fetched. This is just an # safe-guard to prevent possibly fetching too many entities. _FETCH_LIMIT = 100 # Sample point which contains all of the required fields. _SAMPLE_POINT = { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'my_test_suite/my_test', 'revision': 12345, 'value': 22.4, } # Sample Dashboard JSON v1.0 point. _SAMPLE_DASHBOARD_JSON = { 'master': 'ChromiumPerf', 'bot': 'win7', 'point_id': '12345', 'supplemental': { 'os': 'mavericks', 'gpu_oem': 'intel' }, 'versions': { 'chrome': '12.3.45.6', 'blink': '234567' }, 'chart_data': { 'benchmark_name': 'my_test_suite', 'benchmark_description': 'foo', 'format_version': '1.0', 'charts': { 'my_test': { 'summary': { 'type': 'scalar', 'name': 'my_test', 'units': 'ms', 'value': 22.4, } } } } } # Sample Dashboard JSON v1.0 point with trace data. _SAMPLE_DASHBOARD_JSON_WITH_TRACE = { 'master': 'ChromiumPerf', 'bot': 'win7', 'point_id': '12345', 'supplemental': { 'os': 'mavericks', 'gpu_oem': 'intel' }, 'versions': { 'chrome': '12.3.45.6', 'blink': '234567' }, 'chart_data': { 'benchmark_name': 'my_test_suite', 'benchmark_description': 'foo', 'format_version': '1.0', 'charts': { 'my_test': { 'trace1': { 'type': 'scalar', 'name': 'my_test1', 'units': 'ms', 'value': 22.4, }, 'trace2': { 'type': 'scalar', 'name': 'my_test2', 'units': 'ms', 'value': 33.2, } }, 'trace': { 'trace1': { 'name': 'trace', 'type': 'trace', # No cloud_url, should be handled properly }, 'trace2': { 'name': 'trace', 'cloud_url': 'https:\\/\\/console.developer.google.com\\/m', 'type': 'trace', } } } } } # Units to direction to use in the tests below. _UNITS_TO_DIRECTION_DICT = { 'ms': {'improvement_direction': 'down'}, 'fps': {'improvement_direction': 'up'}, } # Sample IP addresses to use in the tests below. _WHITELISTED_IP = '123.45.67.89' class AddPointTest(testing_common.TestCase): def setUp(self): super(AddPointTest, self).setUp() app = webapp2.WSGIApplication([ ('/add_point', add_point.AddPointHandler), ('/add_point_queue', add_point_queue.AddPointQueueHandler)]) self.testapp = webtest.TestApp(app) units_to_direction.UpdateFromJson(_UNITS_TO_DIRECTION_DICT) # Set up the default whitelisted IP used in the tests below. # Note: The behavior of responses from whitelisted and unwhitelisted IPs # is tested in post_data_handler_test.py. testing_common.SetIpWhitelist([_WHITELISTED_IP]) self.SetCurrentUser('[email protected]', is_admin=True) @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTest') def testPost(self, mock_process_test): """Tests all basic functionality of a POST request.""" sheriff.Sheriff( id='my_sheriff1', email='[email protected]', patterns=['*/*/*/dom']).put() data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'dromaeo/dom', 'revision': 12345, 'value': 22.4, 'error': 1.23, 'supplemental_columns': { 'r_webkit': 1355, 'a_extra': 'hello', 'd_median': 22.2, }, }, { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'dromaeo/jslib', 'revision': 12345, 'value': 44.3, } ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Verify everything was added to the database correctly rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(rows)) # Verify all properties of the first Row. self.assertEqual('dom', rows[0].parent_test.string_id()) self.assertEqual('Test', rows[0].parent_test.kind()) self.assertEqual(12345, rows[0].key.id()) self.assertEqual(12345, rows[0].revision) self.assertEqual(22.4, rows[0].value) self.assertEqual(1.23, rows[0].error) self.assertEqual('1355', rows[0].r_webkit) self.assertEqual('hello', rows[0].a_extra) self.assertEqual(22.2, rows[0].d_median) self.assertTrue(rows[0].internal_only) # Verify all properties of the second Row. self.assertEqual(12345, rows[1].key.id()) self.assertEqual(12345, rows[1].revision) self.assertEqual(44.3, rows[1].value) self.assertTrue(rows[1].internal_only) self.assertEqual('jslib', rows[1].parent_test.string_id()) self.assertEqual('Test', rows[1].parent_test.kind()) # There were three Test entities inserted -- the parent Test, # and the two child Test entities in the order given. tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(3, len(tests)) # Nothing was specified for units, so they have their default # values. Same for other the tests below. self.assertEqual('dromaeo', tests[0].key.id()) self.assertEqual('win7', tests[0].bot.id()) self.assertIsNone(tests[0].parent_test) self.assertFalse(tests[0].has_rows) self.assertEqual('ChromiumPerf/win7/dromaeo', tests[0].test_path) self.assertTrue(tests[0].internal_only) self.assertEqual(1, len(tests[0].monitored)) self.assertEqual('dom', tests[0].monitored[0].string_id()) self.assertIsNone(tests[0].units) self.assertEqual('dom', tests[1].key.id()) self.assertEqual('dromaeo', tests[1].parent_test.id()) self.assertEqual('my_sheriff1', tests[1].sheriff.string_id()) self.assertIsNone(tests[1].bot) self.assertTrue(tests[1].has_rows) self.assertEqual('ChromiumPerf/win7/dromaeo/dom', tests[1].test_path) self.assertTrue(tests[1].internal_only) self.assertIsNone(tests[1].units) self.assertEqual('jslib', tests[2].key.id()) self.assertEqual('dromaeo', tests[2].parent_test.id()) self.assertIsNone(tests[2].sheriff) self.assertIsNone(tests[2].bot) self.assertTrue(tests[2].has_rows) self.assertEqual('ChromiumPerf/win7/dromaeo/jslib', tests[2].test_path) self.assertTrue(tests[2].internal_only) self.assertIsNone(tests[2].units) # Both sample entries have the same master' and 'bot' values, so one # Master and one Bot entity were created. bots = graph_data.Bot.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(1, len(bots)) self.assertEqual('win7', bots[0].key.id()) self.assertEqual('ChromiumPerf', bots[0].key.parent().id()) self.assertTrue(bots[0].internal_only) masters = graph_data.Master.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(1, len(masters)) self.assertEqual('ChromiumPerf', masters[0].key.id()) self.assertIsNone(masters[0].key.parent()) # Verify that an anomaly processing was called. mock_process_test.assert_called_once_with(tests[1].key) @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTest') def testPost_TestNameEndsWithUnderscoreRef_ProcessTestIsNotCalled( self, mock_process_test): """Tests that tests ending with _ref aren't analyze for anomalies.""" sheriff.Sheriff( id='ref_sheriff', email='[email protected]', patterns=['*/*/*/*']).put() point = _SAMPLE_POINT.copy() point['test'] = '1234/abcd_ref' self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) self.assertFalse(mock_process_test.called) @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTest') def testPost_TestNameEndsWithSlashRef_ProcessTestIsNotCalled( self, mock_process_test): """Tests that leaf tests named ref aren't added to the task queue.""" sheriff.Sheriff( id='ref_sheriff', email='[email protected]', patterns=['*/*/*/*']).put() point = _SAMPLE_POINT.copy() point['test'] = '1234/ref' self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) self.assertFalse(mock_process_test.called) @mock.patch.object(add_point_queue.find_anomalies, 'ProcessTest') def testPost_TestNameEndsContainsButDoesntEndWithRef_ProcessTestIsCalled( self, mock_process_test): sheriff.Sheriff( id='ref_sheriff', email='[email protected]', patterns=['*/*/*/*']).put() point = _SAMPLE_POINT.copy() point['test'] = '_ref/abcd' self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) self.assertTrue(mock_process_test.called) def testPost_TestPathTooLong_PointRejected(self): """Tests that an error is returned when the test path would be too long.""" point = _SAMPLE_POINT.copy() point['test'] = 'long_test/%s' % ('x' * 490) self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(0, len(tests)) def testPost_TrailingSlash_Ignored(self): point = _SAMPLE_POINT.copy() point['test'] = 'mach_ports_parent/mach_ports/' self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(tests)) self.assertEqual('mach_ports_parent', tests[0].key.id()) self.assertEqual('mach_ports', tests[1].key.id()) self.assertEqual('mach_ports_parent', tests[1].parent_test.id()) def test_LeadingSlash_Ignored(self): point = _SAMPLE_POINT.copy() point['test'] = '/boot_time/pre_plugin_time' self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(tests)) self.assertEqual('boot_time', tests[0].key.id()) self.assertEqual('pre_plugin_time', tests[1].key.id()) self.assertEqual('boot_time', tests[1].parent_test.id()) def testPost_BadJson_DataRejected(self): """Tests that an error is returned when the given data is not valid JSON.""" self.testapp.post( '/add_point', {'data': "This isn't JSON"}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) def testPost_BadGraphName_DataRejected(self): """Tests that an error is returned when the test name has too many parts.""" point = _SAMPLE_POINT.copy() point['test'] = 'a/b/c/d/e/f/g/h/i/j/k' self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) def testPost_TestNameHasDoubleUnderscores_Rejected(self): point = _SAMPLE_POINT.copy() point['test'] = 'my_test_suite/__my_test__' self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) @mock.patch('logging.error') @mock.patch.object(graph_data.Master, 'get_by_id') def testPost_BadRequestError_ErrorLogged( self, mock_get_by_id, mock_logging_error): """Tests that error is logged if a datastore BadRequestError happens.""" mock_get_by_id.side_effect = datastore_errors.BadRequestError self.testapp.post( '/add_point', {'data': json.dumps([_SAMPLE_POINT])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) self.assertEqual(1, len(mock_logging_error.mock_calls)) def testPost_IncompleteData_DataRejected(self): """Tests that an error is returned when the given columns are invalid.""" data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'foo/bar/baz', } ]) self.testapp.post( '/add_point', {'data': data_param}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) def testPost_NoRevisionAndNoVersionNums_Rejected(self): """Asserts post fails when both revision and version numbers are missing.""" data_param = json.dumps([ { 'master': 'CrosPerf', 'bot': 'lumpy', 'test': 'mach_ports/mach_ports/', 'value': '231.666666667', 'error': '2.28521820013', } ]) self.testapp.post( '/add_point', {'data': data_param}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) def testPost_InvalidRevision_Rejected(self): point = _SAMPLE_POINT.copy() point['revision'] = 'I am not a valid revision number!' response = self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.assertEqual( 'Bad value for "revision", should be numerical.\n', response.body) def testPost_InvalidSupplementalRevision_DropsRevision(self): point = _SAMPLE_POINT.copy() point['supplemental_columns'] = { 'r_one': '1234', 'r_two': 'I am not a valid revision or version.', } self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Supplemental revision numbers with an invalid format should be dropped. row = graph_data.Row.query().get() self.assertEqual('1234', row.r_one) self.assertFalse(hasattr(row, 'r_two')) def testPost_UnWhitelistedBots_MarkedInternalOnly(self): bot_whitelist.BotWhitelist( id=bot_whitelist.WHITELIST_KEY, bots=['linux-release', 'win7']).put() parent = graph_data.Master(id='ChromiumPerf').put() parent = graph_data.Bot( id='suddenly_secret', parent=parent, internal_only=False).put() graph_data.Test(id='dromaeo', parent=parent, internal_only=False).put() data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'dromaeo/dom', 'value': '33.2', 'revision': '1234', }, { 'master': 'ChromiumPerf', 'bot': 'very_secret', 'test': 'dromaeo/dom', 'value': '100.1', 'revision': '1234', }, { 'master': 'ChromiumPerf', 'bot': 'suddenly_secret', 'test': 'dromaeo/dom', 'value': '22.3', 'revision': '1234', }, ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) bots = graph_data.Bot.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(3, len(bots)) self.assertEqual('suddenly_secret', bots[0].key.string_id()) self.assertTrue(bots[0].internal_only) self.assertEqual('very_secret', bots[1].key.string_id()) self.assertTrue(bots[1].internal_only) self.assertEqual('win7', bots[2].key.string_id()) self.assertFalse(bots[2].internal_only) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(6, len(tests)) self.assertEqual('dromaeo', tests[0].key.string_id()) self.assertEqual('suddenly_secret', tests[0].key.parent().string_id()) self.assertTrue(tests[0].internal_only) self.assertEqual('dom', tests[1].key.string_id()) self.assertTrue(tests[1].internal_only) self.assertEqual('dromaeo', tests[2].key.string_id()) self.assertEqual('very_secret', tests[2].key.parent().string_id()) self.assertTrue(tests[2].internal_only) self.assertEqual('dom', tests[3].key.string_id()) self.assertTrue(tests[3].internal_only) self.assertEqual('dromaeo', tests[4].key.string_id()) self.assertEqual('win7', tests[4].key.parent().string_id()) self.assertFalse(tests[4].internal_only) self.assertEqual('dom', tests[5].key.string_id()) self.assertFalse(tests[5].internal_only) rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(3, len(rows)) self.assertTrue(rows[0].internal_only) self.assertTrue(rows[1].internal_only) self.assertFalse(rows[2].internal_only) @mock.patch.object( add_point_queue.find_anomalies, 'ProcessTest', mock.MagicMock()) def testPost_NewTest_SheriffPropertyIsAdded(self): """Tests that sheriffs are added to tests when Tests are created.""" sheriff1 = sheriff.Sheriff( id='sheriff1', email='[email protected]', patterns=['ChromiumPerf/*/*/jslib']).put() sheriff2 = sheriff.Sheriff( id='sheriff2', email='[email protected]', patterns=['*/*/image_benchmark/*', '*/*/scrolling_benchmark/*']).put() data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/mean_frame_time', 'revision': 123456, 'value': 700, }, { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'dromaeo/jslib', 'revision': 123445, 'value': 200, }, { 'master': 'ChromiumWebkit', 'bot': 'win7', 'test': 'dromaeo/jslib', 'revision': 12345, 'value': 205.3, } ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) sheriff1_test = ndb.Key( 'Master', 'ChromiumPerf', 'Bot', 'win7', 'Test', 'dromaeo', 'Test', 'jslib').get() self.assertEqual(sheriff1, sheriff1_test.sheriff) sheriff2_test = ndb.Key( 'Master', 'ChromiumPerf', 'Bot', 'win7', 'Test', 'scrolling_benchmark', 'Test', 'mean_frame_time').get() self.assertEqual(sheriff2, sheriff2_test.sheriff) no_sheriff_test = ndb.Key( 'Master', 'ChromiumWebkit', 'Bot', 'win7', 'Test', 'dromaeo', 'Test', 'jslib').get() self.assertIsNone(no_sheriff_test.sheriff) test_suite = ndb.Key( 'Master', 'ChromiumPerf', 'Bot', 'win7', 'Test', 'scrolling_benchmark').get() self.assertEqual(1, len(test_suite.monitored)) self.assertEqual('mean_frame_time', test_suite.monitored[0].string_id()) def testPost_NewTest_AnomalyConfigPropertyIsAdded(self): """Tests that AnomalyConfig keys are added to Tests upon creation. Like with sheriffs, AnomalyConfig keys are to Test when the Test is put if the Test matches the pattern of the AnomalyConfig. """ anomaly_config1 = anomaly_config.AnomalyConfig( id='modelset1', config='', patterns=['ChromiumPerf/*/dromaeo/jslib']).put() anomaly_config2 = anomaly_config.AnomalyConfig( id='modelset2', config='', patterns=['*/*image_benchmark/*', '*/*/scrolling_benchmark/*']).put() data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/mean_frame_time', 'revision': 123456, 'value': 700, }, { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'dromaeo/jslib', 'revision': 123445, 'value': 200, }, { 'master': 'ChromiumWebkit', 'bot': 'win7', 'test': 'dromaeo/jslib', 'revision': 12345, 'value': 205.3, } ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) anomaly_config1_test = ndb.Key( 'Master', 'ChromiumPerf', 'Bot', 'win7', 'Test', 'dromaeo', 'Test', 'jslib').get() self.assertEqual( anomaly_config1, anomaly_config1_test.overridden_anomaly_config) anomaly_config2_test = ndb.Key( 'Master', 'ChromiumPerf', 'Bot', 'win7', 'Test', 'scrolling_benchmark', 'Test', 'mean_frame_time').get() self.assertEqual( anomaly_config2, anomaly_config2_test.overridden_anomaly_config) no_config_test = ndb.Key( 'Master', 'ChromiumWebkit', 'Bot', 'win7', 'Test', 'dromaeo', 'Test', 'jslib').get() self.assertIsNone(no_config_test.overridden_anomaly_config) def testPost_NewTest_AddsUnits(self): """Tests that units and improvement direction are added for new Tests.""" data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/mean_frame_time', 'revision': 123456, 'value': 700, 'units': 'ms', } ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(tests)) self.assertEqual('scrolling_benchmark', tests[0].key.string_id()) self.assertIsNone(tests[0].units) self.assertEqual(anomaly.UNKNOWN, tests[0].improvement_direction) self.assertEqual('mean_frame_time', tests[1].key.string_id()) self.assertEqual('ms', tests[1].units) self.assertEqual(anomaly.DOWN, tests[1].improvement_direction) def testPost_NewPointWithNewUnits_TestUnitsAreUpdated(self): parent = graph_data.Master(id='ChromiumPerf').put() parent = graph_data.Bot(id='win7', parent=parent).put() parent = graph_data.Test(id='scrolling_benchmark', parent=parent).put() graph_data.Test( id='mean_frame_time', parent=parent, units='ms', improvement_direction=anomaly.DOWN).put() data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/mean_frame_time', 'revision': 123456, 'value': 700, 'units': 'fps', } ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(tests)) self.assertEqual('scrolling_benchmark', tests[0].key.string_id()) self.assertIsNone(tests[0].units) self.assertEqual(anomaly.UNKNOWN, tests[0].improvement_direction) self.assertEqual('mean_frame_time', tests[1].key.string_id()) self.assertEqual('fps', tests[1].units) self.assertEqual(anomaly.UP, tests[1].improvement_direction) def testPost_NewPoint_UpdatesImprovementDirection(self): """Tests that adding a point updates units for an existing Test.""" parent = graph_data.Master(id='ChromiumPerf').put() parent = graph_data.Bot(id='win7', parent=parent).put() parent = graph_data.Test(id='scrolling_benchmark', parent=parent).put() frame_time_key = graph_data.Test( id='frame_time', parent=parent, units='ms', improvement_direction=anomaly.DOWN).put() # Before sending the new data point, the improvement direction is down. test = frame_time_key.get() self.assertEqual(anomaly.DOWN, test.improvement_direction) data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/frame_time', 'revision': 123456, 'value': 700, 'units': 'ms', 'higher_is_better': True, } ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # After sending the new data which explicitly specifies an improvement # direction, the improvement direction is changed even though the units # (ms) usually indicates an improvement direction of down. test = frame_time_key.get() self.assertEqual(anomaly.UP, test.improvement_direction) def testPost_DirectionUpdatesWithUnitMap(self): """Tests that adding a point updates units for an existing Test.""" parent = graph_data.Master(id='ChromiumPerf').put() parent = graph_data.Bot(id='win7', parent=parent).put() parent = graph_data.Test(id='scrolling_benchmark', parent=parent).put() graph_data.Test( id='mean_frame_time', parent=parent, units='ms', improvement_direction=anomaly.UNKNOWN).put() point = { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/mean_frame_time', 'revision': 123456, 'value': 700, 'units': 'ms', } self.testapp.post('/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': '123.45.67.89'}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(tests)) self.assertEqual('scrolling_benchmark', tests[0].key.string_id()) self.assertIsNone(tests[0].units) self.assertEqual(anomaly.UNKNOWN, tests[0].improvement_direction) self.assertEqual('mean_frame_time', tests[1].key.string_id()) self.assertEqual('ms', tests[1].units) self.assertEqual(anomaly.DOWN, tests[1].improvement_direction) def testPost_AddNewPointToDeprecatedTest_ResetsDeprecated(self): """Tests that adding a point sets the test to be non-deprecated.""" parent = graph_data.Master(id='ChromiumPerf').put() parent = graph_data.Bot(id='win7', parent=parent).put() suite = graph_data.Test( id='scrolling_benchmark', parent=parent, deprecated=True).put() graph_data.Test(id='mean_frame_time', parent=suite, deprecated=True).put() point = { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/mean_frame_time', 'revision': 123456, 'value': 700, } self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) tests = graph_data.Test.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(tests)) # Note that the parent test is also marked as non-deprecated. self.assertEqual('scrolling_benchmark', tests[0].key.string_id()) self.assertFalse(tests[0].deprecated) self.assertEqual('mean_frame_time', tests[1].key.string_id()) self.assertFalse(tests[1].deprecated) def testPost_GitHashSupplementalRevision_Accepted(self): """Tests that git hashes can be added as supplemental revision columns.""" point = _SAMPLE_POINT.copy() point['revision'] = 123 point['supplemental_columns'] = { 'r_chromium_rev': '2eca27b067e3e57c70e40b8b95d0030c5d7c1a7f', 'r_webkit_rev': 'bf9aa8d62561bb2e4d7bc09e9d9e8c6a665ddc88', } self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(1, len(rows)) self.assertEqual(123, rows[0].key.id()) self.assertEqual(123, rows[0].revision) self.assertEqual( '2eca27b067e3e57c70e40b8b95d0030c5d7c1a7f', rows[0].r_chromium_rev) self.assertEqual( 'bf9aa8d62561bb2e4d7bc09e9d9e8c6a665ddc88', rows[0].r_webkit_rev) def testPost_NewSuite_CachedSubTestsDeleted(self): """Tests that cached test lists are cleared as new test suites are added.""" # Set the cached test lists. Note that no actual Test entities are added # here, so when a new point is added, it will still count as a new Test. layered_cache.Set( graph_data.LIST_TESTS_SUBTEST_CACHE_KEY % ( 'ChromiumPerf', 'win7', 'scrolling_benchmark'), {'foo': 'bar'}) layered_cache.Set( graph_data.LIST_TESTS_SUBTEST_CACHE_KEY % ( 'ChromiumPerf', 'mac', 'scrolling_benchmark'), {'foo': 'bar'}) data_param = json.dumps([ { 'master': 'ChromiumPerf', 'bot': 'win7', 'test': 'scrolling_benchmark/mean_frame_time', 'revision': 123456, 'value': 700, } ]) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Subtests for ChromiumPerf/win7/scrolling_benchmark should be cleared. self.assertIsNone(layered_cache.Get( graph_data.LIST_TESTS_SUBTEST_CACHE_KEY % ( 'ChromiumPerf', 'win7', 'scrolling_benchmark'))) # Subtests for another bot should NOT be cleared. self.assertEqual({'foo': 'bar'}, layered_cache.Get( graph_data.LIST_TESTS_SUBTEST_CACHE_KEY % ( 'ChromiumPerf', 'mac', 'scrolling_benchmark'))) def testParseColumns(self): """Tests that the GetAndValidateRowProperties method handles valid data.""" expected = { 'value': 444.55, 'error': 12.3, 'r_webkit': '12345', 'r_skia': '43210', 'a_note': 'hello', 'd_run_1': 444.66, 'd_run_2': 444.44, } actual = add_point.GetAndValidateRowProperties( { 'revision': 12345, 'value': 444.55, 'error': 12.3, 'supplemental_columns': { 'r_webkit': 12345, 'r_skia': 43210, 'a_note': 'hello', 'd_run_1': 444.66, 'd_run_2': 444.44, }, } ) self.assertEqual(expected, actual) def testPost_NoValue_Rejected(self): """Tests the error returned when no "value" is given.""" point = _SAMPLE_POINT.copy() del point['value'] response = self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.assertEqual('No "value" given.\n', response.body) self.assertIsNone(graph_data.Row.query().get()) def testPost_WithBadValue_Rejected(self): """Tests the error returned when an invalid "value" is given.""" point = _SAMPLE_POINT.copy() point['value'] = 'hello' response = self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) self.assertEqual( 'Bad value for "value", should be numerical.\n', response.body) self.assertIsNone(graph_data.Row.query().get()) def testPost_WithBadPointErrorValue_ErrorValueDropped(self): point = _SAMPLE_POINT.copy() point['error'] = 'not a number' self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) row = graph_data.Row.query().get() self.assertIsNone(row.error) def testPost_TooManyColumns_SomeColumnsDropped(self): """Tests that some columns are dropped if there are too many.""" point = _SAMPLE_POINT.copy() supplemental_columns = {} for i in range(1, add_point._MAX_NUM_COLUMNS * 2): supplemental_columns['d_run_%d' % i] = i point['supplemental_columns'] = supplemental_columns self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) row = graph_data.Row.query().get() row_dict = row.to_dict() data_columns = [c for c in row_dict if c.startswith('d_')] self.assertGreater(len(data_columns), 1) self.assertLessEqual(len(data_columns), add_point._MAX_NUM_COLUMNS) def testPost_BadSupplementalColumnName_ColumnDropped(self): point = _SAMPLE_POINT.copy() point['supplemental_columns'] = {'q_foo': 'bar'} self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Supplemental columns with undefined prefixes should be dropped. row = graph_data.Row.query().get() self.assertFalse(hasattr(row, 'q_foo')) def testPost_LongSupplementalColumnName_ColumnDropped(self): point = _SAMPLE_POINT.copy() key = 'a_' + ('a' * add_point._MAX_COLUMN_NAME_LENGTH) point['supplemental_columns'] = { key: '1234', } self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Supplemental columns with super long names should be dropped. row = graph_data.Row.query().get() self.assertFalse(hasattr(row, key)) def testPost_LongSupplementalAnnotation_ColumnDropped(self): point = _SAMPLE_POINT.copy() point['supplemental_columns'] = { 'a_one': 'z' * (add_point._STRING_COLUMN_MAX_LENGTH + 1), 'a_two': 'hello', } self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Row properties with names that are too long are not added. row = graph_data.Row.query().get() self.assertFalse(hasattr(row, 'a_one')) self.assertEqual('hello', row.a_two) def testPost_BadSupplementalDataColumn_ColumnDropped(self): """Tests that bad supplemental data columns are dropped.""" point = _SAMPLE_POINT.copy() point['supplemental_columns'] = { 'd_run_1': 'hello', 'd_run_2': 42.5, } self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) # Row data properties that aren't numerical aren't added. row = graph_data.Row.query().get() self.assertFalse(hasattr(row, 'd_run_1')) self.assertEqual(42.5, row.d_run_2) def testPost_RevisionTooLow_Rejected(self): # If a point's ID is much lower than the last one, it should be rejected # because this indicates that the revision type was accidentally changed. # First add one point; it's accepted because it's the first in the series. point = _SAMPLE_POINT.copy() point['revision'] = 1408479179 self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) test_path = 'ChromiumPerf/win7/my_test_suite/my_test' last_added_revision = ndb.Key('LastAddedRevision', test_path).get() self.assertEqual(1408479179, last_added_revision.revision) point = _SAMPLE_POINT.copy() point['revision'] = 285000 self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) rows = graph_data.Row.query().fetch() self.assertEqual(1, len(rows)) def testPost_RevisionTooHigh_Rejected(self): # First add one point; it's accepted because it's the first in the series. point = _SAMPLE_POINT.copy() point['revision'] = 285000 self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) point = _SAMPLE_POINT.copy() point['revision'] = 1408479179 self.testapp.post( '/add_point', {'data': json.dumps([point])}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) rows = graph_data.Row.query().fetch() self.assertEqual(1, len(rows)) def testPost_MultiplePointsWithCloseRevisions_Accepted(self): point = _SAMPLE_POINT.copy() point['revision'] = 285000 self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) point = _SAMPLE_POINT.copy() point['revision'] = 285200 self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) point = _SAMPLE_POINT.copy() point['revision'] = 285100 self.testapp.post( '/add_point', {'data': json.dumps([point])}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) rows = graph_data.Row.query().fetch() self.assertEqual(3, len(rows)) def testPost_ValidRow_CorrectlyAdded(self): """Tests that adding a chart causes the correct row to be added.""" data_param = json.dumps(_SAMPLE_DASHBOARD_JSON) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(1, len(rows)) self.assertEqual(12345, rows[0].revision) self.assertEqual(22.4, rows[0].value) self.assertEqual(0, rows[0].error) self.assertEqual('12.3.45.6', rows[0].r_chrome) self.assertEqual('234567', rows[0].r_blink) self.assertEqual('mavericks', rows[0].a_os) self.assertEqual('intel', rows[0].a_gpu_oem) test_suite = ndb.Key( 'Master', 'ChromiumPerf', 'Bot', 'win7', 'Test', 'my_test_suite').get() self.assertEqual('foo', test_suite.description) def testPost_WithBenchmarkRerunOptions_AddsTraceRerunOptions(self): sample_json = _SAMPLE_DASHBOARD_JSON.copy() sample_json['chart_data']['trace_rerun_options'] = [['foo', '--foo']] data_param = json.dumps(sample_json) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT) self.assertEqual('--foo', rows[0].a_trace_rerun_options.foo) def testPost_FormatV1_CorrectlyAdded(self): """Tests that adding a chart causes the correct trace to be added.""" data_param = json.dumps(_SAMPLE_DASHBOARD_JSON_WITH_TRACE) self.testapp.post( '/add_point', {'data': data_param}, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) self.ExecuteTaskQueueTasks('/add_point_queue', add_point._TASK_QUEUE_NAME) rows = graph_data.Row.query().fetch(limit=_FETCH_LIMIT) self.assertEqual(2, len(rows)) self.assertEqual(12345, rows[0].revision) self.assertEqual(22.4, rows[0].value) self.assertEqual(0, rows[0].error) self.assertEqual('12.3.45.6', rows[0].r_chrome) self.assertFalse(hasattr(rows[0], 'a_tracing_uri')) self.assertEqual(33.2, rows[1].value) self.assertEqual('https://console.developer.google.com/m', rows[1].a_tracing_uri) def testPost_FormatV1_BadMaster_Rejected(self): """Tests that attempting to post with no master name will error.""" chart = _SAMPLE_DASHBOARD_JSON.copy() del chart['master'] self.testapp.post( '/add_point', {'data': json.dumps(chart)}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) def testPost_FormatV1_BadBot_Rejected(self): """Tests that attempting to post with no bot name will error.""" chart = _SAMPLE_DASHBOARD_JSON.copy() del chart['bot'] self.testapp.post( '/add_point', {'data': json.dumps(chart)}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) def testPost_FormatV1_BadPointId_Rejected(self): """Tests that attempting to post a chart no point id will error.""" chart = _SAMPLE_DASHBOARD_JSON.copy() del chart['point_id'] self.testapp.post( '/add_point', {'data': json.dumps(chart)}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) def testPost_GarbageDict_Rejected(self): """Tests that posting an ill-formatted dict will error.""" chart = {'foo': 'garbage'} self.testapp.post( '/add_point', {'data': json.dumps(chart)}, status=400, extra_environ={'REMOTE_ADDR': _WHITELISTED_IP}) class FlattenTraceTest(testing_common.TestCase): def testDashboardJsonToRawRows_WithIsRef(self): """Tests that rows from a chart from a ref build have the correct name.""" chart = _SAMPLE_DASHBOARD_JSON.copy() chart['is_ref'] = True rows = add_point.AddPointHandler()._DashboardJsonToRawRows(chart) self.assertEqual('my_test_suite/my_test/ref', rows[0]['test']) def testFlattenTrace_PreservesUnits(self): """Tests that _FlattenTrace preserves the units property.""" trace = { 'type': 'scalar', 'name': 'overall', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace('foo', 'bar', 'bar', trace) self.assertEqual(row['units'], 'ms') def testFlattenTrace_CoreTraceName(self): """Tests that chartname.summary will be flattened to chartname.""" trace = { 'type': 'scalar', 'name': 'bar', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace('foo', 'bar', 'summary', trace) self.assertEqual(row['test'], 'foo/bar') def testFlattenTrace_NonSummaryTraceName_SetCorrectly(self): """Tests that chart.trace will be flattened to chart/trace.""" trace = { 'type': 'scalar', 'name': 'bar.baz', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace('foo', 'bar', 'baz', trace) self.assertEqual(row['test'], 'foo/bar/baz') def testFlattenTraceAddsImprovementDirectionIfPresent(self): """Tests that improvement_direction will be respected if present.""" trace = { 'type': 'scalar', 'name': 'bar', 'units': 'ms', 'value': 42, 'improvement_direction': 'up' } row = add_point._FlattenTrace('foo', 'bar', 'summary', trace) self.assertIn('higher_is_better', row) self.assertEqual(row['higher_is_better'], True) def testFlattenTraceDoesNotAddImprovementDirectionIfAbsent(self): """Tests that no higher_is_better is added if no improvement_direction.""" trace = { 'type': 'scalar', 'name': 'bar', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace('foo', 'bar', 'summary', trace) self.assertNotIn('higher_is_better', row) def testFlattenTraceRejectsBadImprovementDirection(self): """Tests that passing a bad improvement_direction will cause an error.""" trace = { 'type': 'scalar', 'name': 'bar', 'units': 'ms', 'value': 42, 'improvement_direction': 'foo' } with self.assertRaises(add_point.BadRequestError): add_point._FlattenTrace('foo', 'bar', 'summary', trace) def testFlattenTrace_ScalarValue(self): """Tests that scalars are flattened to 0-error values.""" trace = { 'type': 'scalar', 'name': 'overall', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace('foo', 'bar', 'baz', trace) self.assertEqual(row['value'], 42) self.assertEqual(row['error'], 0) def testFlattenTraceScalarNoneValue(self): """Tests that scalar NoneValue is flattened to NaN.""" trace = { 'type': 'scalar', 'name': 'overall', 'units': 'ms', 'value': None, 'none_value_reason': 'Reason for test' } row = add_point._FlattenTrace('foo', 'bar', 'baz', trace) self.assertTrue(math.isnan(row['value'])) self.assertEqual(row['error'], 0) def testFlattenTraceListValue(self): """Tests that lists are properly flattened to avg/stddev.""" trace = { 'type': 'list_of_scalar_values', 'name': 'bar.baz', 'units': 'ms', 'values': [5, 10, 25, 10, 15], } row = add_point._FlattenTrace('foo', 'bar', 'baz', trace) self.assertAlmostEqual(row['value'], 13) self.assertAlmostEqual(row['error'], 6.78232998) def testFlattenTraceListValueWithStd(self): """Tests that lists with reported std use std as error.""" trace = { 'type': 'list_of_scalar_values', 'name': 'bar.baz', 'units': 'ms', 'values': [5, 10, 25, 10, 15], 'std': 100, } row = add_point._FlattenTrace('foo', 'bar', 'baz', trace) self.assertNotAlmostEqual(row['error'], 6.78232998) self.assertEqual(row['error'], 100) def testFlattenTrace_ListNoneValue(self): """Tests that LoS NoneValue is flattened to NaN.""" trace = { 'type': 'list_of_scalar_values', 'name': 'overall', 'units': 'ms', 'value': None, 'none_value_reason': 'Reason for test' } row = add_point._FlattenTrace('foo', 'bar', 'baz', trace) self.assertTrue(math.isnan(row['value'])) self.assertTrue(math.isnan(row['error'])) def testFlattenTrace_HistogramValue(self): """Tests that histograms are yield geommean/stddev as value/error.""" trace = { 'type': 'histogram', 'name': 'bar.baz', 'units': 'ms', 'buckets': [{'low': 1, 'high': 5, 'count': 3}, {'low': 4, 'high': 6, 'count': 4}] } row = add_point._FlattenTrace('foo', 'bar', 'baz', trace) self.assertAlmostEqual(row['value'], 4.01690877) self.assertAlmostEqual(row['error'], 0.99772482) def testFlattenTrace_RespectsIsRefForSameTraceName(self): """Tests whether a ref trace that is a chart has the /ref suffix.""" trace = { 'type': 'scalar', 'name': 'bar', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace( 'foo', 'bar', 'summary', trace, is_ref=True) self.assertEqual(row['test'], 'foo/bar/ref') def testFlattenTrace_RespectsIsRefForDifferentTraceName(self): """Tests whether a ref trace that is not a chart has the _ref suffix.""" trace = { 'type': 'scalar', 'name': 'bar.baz', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace( 'foo', 'bar', 'baz', trace, is_ref=True) self.assertEqual(row['test'], 'foo/bar/baz_ref') def testFlattenTrace_SanitizesTraceName(self): """Tests whether a trace name with special characters is sanitized.""" trace = { 'type': 'scalar', 'name': 'bar.baz', 'page': 'http://example.com', 'units': 'ms', 'value': 42 } row = add_point._FlattenTrace( 'foo', 'bar', 'http://example.com', trace) self.assertEqual(row['test'], 'foo/bar/http___example.com') def testFlattenTrace_FlattensInteractionRecordLabelToFivePartName(self): """Tests whether a TIR label will appear between chart and trace name.""" trace = { 'type': 'scalar', 'name': 'bar', 'page': 'https://abc.xyz/', 'units': 'ms', 'value': 42, 'tir_label': 'baz' } row = add_point._FlattenTrace('foo', 'baz@@bar', 'https://abc.xyz/', trace) self.assertEqual(row['test'], 'foo/bar/baz/https___abc.xyz_') if __name__ == '__main__': unittest.main()
juanalfonsopr/odoo
refs/heads/8.0
openerp/addons/base/module/module.py
68
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from docutils import nodes from docutils.core import publish_string from docutils.transforms import Transform, writer_aux from docutils.writers.html4css1 import Writer import importlib import logging from operator import attrgetter import os import re import shutil import tempfile import urllib import urllib2 import urlparse import zipfile import zipimport import lxml.html try: from cStringIO import StringIO except ImportError: from StringIO import StringIO # NOQA import openerp import openerp.exceptions from openerp import modules, tools from openerp.modules.db import create_categories from openerp.modules import get_module_resource from openerp.tools.parse_version import parse_version from openerp.tools.translate import _ from openerp.osv import osv, orm, fields from openerp import api, fields as fields2 _logger = logging.getLogger(__name__) ACTION_DICT = { 'view_type': 'form', 'view_mode': 'form', 'res_model': 'base.module.upgrade', 'target': 'new', 'type': 'ir.actions.act_window', 'nodestroy': True, } def backup(path, raise_exception=True): path = os.path.normpath(path) if not os.path.exists(path): if not raise_exception: return None raise OSError('path does not exists') cnt = 1 while True: bck = '%s~%d' % (path, cnt) if not os.path.exists(bck): shutil.move(path, bck) return bck cnt += 1 class module_category(osv.osv): _name = "ir.module.category" _description = "Application" def _module_nbr(self, cr, uid, ids, prop, unknow_none, context): cr.execute('SELECT category_id, COUNT(*) \ FROM ir_module_module \ WHERE category_id IN %(ids)s \ OR category_id IN (SELECT id \ FROM ir_module_category \ WHERE parent_id IN %(ids)s) \ GROUP BY category_id', {'ids': tuple(ids)} ) result = dict(cr.fetchall()) for id in ids: cr.execute('select id from ir_module_category where parent_id=%s', (id,)) result[id] = sum([result.get(c, 0) for (c,) in cr.fetchall()], result.get(id, 0)) return result _columns = { 'name': fields.char("Name", required=True, translate=True, select=True), 'parent_id': fields.many2one('ir.module.category', 'Parent Application', select=True), 'child_ids': fields.one2many('ir.module.category', 'parent_id', 'Child Applications'), 'module_nr': fields.function(_module_nbr, string='Number of Modules', type='integer'), 'module_ids': fields.one2many('ir.module.module', 'category_id', 'Modules'), 'description': fields.text("Description", translate=True), 'sequence': fields.integer('Sequence'), 'visible': fields.boolean('Visible'), 'xml_id': fields.function(osv.osv.get_external_id, type='char', string="External ID"), } _order = 'name' _defaults = { 'visible': 1, } class MyFilterMessages(Transform): """ Custom docutils transform to remove `system message` for a document and generate warnings. (The standard filter removes them based on some `report_level` passed in the `settings_override` dictionary, but if we use it, we can't see them and generate warnings.) """ default_priority = 870 def apply(self): for node in self.document.traverse(nodes.system_message): _logger.warning("docutils' system message present: %s", str(node)) node.parent.remove(node) class MyWriter(Writer): """ Custom docutils html4ccs1 writer that doesn't add the warnings to the output document. """ def get_transforms(self): return [MyFilterMessages, writer_aux.Admonitions] class module(osv.osv): _name = "ir.module.module" _rec_name = "shortdesc" _description = "Module" def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): res = super(module, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False) result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_server_module_immediate_install')[1] if view_type == 'form': if res.get('toolbar',False): list = [rec for rec in res['toolbar']['action'] if rec.get('id', False) != result] res['toolbar'] = {'action': list} return res @classmethod def get_module_info(cls, name): info = {} try: info = modules.load_information_from_description_file(name) except Exception: _logger.debug('Error when trying to fetch informations for ' 'module %s', name, exc_info=True) return info def _get_desc(self, cr, uid, ids, field_name=None, arg=None, context=None): res = dict.fromkeys(ids, '') for module in self.browse(cr, uid, ids, context=context): path = get_module_resource(module.name, 'static/description/index.html') if path: with tools.file_open(path, 'rb') as desc_file: doc = desc_file.read() html = lxml.html.document_fromstring(doc) for element, attribute, link, pos in html.iterlinks(): if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'): element.set('src', "/%s/static/description/%s" % (module.name, element.get('src'))) res[module.id] = lxml.html.tostring(html) else: overrides = { 'embed_stylesheet': False, 'doctitle_xform': False, 'output_encoding': 'unicode', 'xml_declaration': False, } output = publish_string(source=module.description or '', settings_overrides=overrides, writer=MyWriter()) res[module.id] = output return res def _get_latest_version(self, cr, uid, ids, field_name=None, arg=None, context=None): default_version = modules.adapt_version('1.0') res = dict.fromkeys(ids, default_version) for m in self.browse(cr, uid, ids): res[m.id] = self.get_module_info(m.name).get('version', default_version) return res def _get_views(self, cr, uid, ids, field_name=None, arg=None, context=None): res = {} model_data_obj = self.pool.get('ir.model.data') dmodels = [] if field_name is None or 'views_by_module' in field_name: dmodels.append('ir.ui.view') if field_name is None or 'reports_by_module' in field_name: dmodels.append('ir.actions.report.xml') if field_name is None or 'menus_by_module' in field_name: dmodels.append('ir.ui.menu') assert dmodels, "no models for %s" % field_name for module_rec in self.browse(cr, uid, ids, context=context): res_mod_dic = res[module_rec.id] = { 'menus_by_module': [], 'reports_by_module': [], 'views_by_module': [] } # Skip uninstalled modules below, no data to find anyway. if module_rec.state not in ('installed', 'to upgrade', 'to remove'): continue # then, search and group ir.model.data records imd_models = dict([(m, []) for m in dmodels]) imd_ids = model_data_obj.search(cr, uid, [ ('module', '=', module_rec.name), ('model', 'in', tuple(dmodels)) ]) for imd_res in model_data_obj.read(cr, uid, imd_ids, ['model', 'res_id'], context=context): imd_models[imd_res['model']].append(imd_res['res_id']) def browse(model): M = self.pool[model] # as this method is called before the module update, some xmlid may be invalid at this stage # explictly filter records before reading them ids = M.exists(cr, uid, imd_models.get(model, []), context) return M.browse(cr, uid, ids, context) def format_view(v): aa = v.inherit_id and '* INHERIT ' or '' return '%s%s (%s)' % (aa, v.name, v.type) res_mod_dic['views_by_module'] = map(format_view, browse('ir.ui.view')) res_mod_dic['reports_by_module'] = map(attrgetter('name'), browse('ir.actions.report.xml')) res_mod_dic['menus_by_module'] = map(attrgetter('complete_name'), browse('ir.ui.menu')) for key in res.iterkeys(): for k, v in res[key].iteritems(): res[key][k] = "\n".join(sorted(v)) return res def _get_icon_image(self, cr, uid, ids, field_name=None, arg=None, context=None): res = dict.fromkeys(ids, '') for module in self.browse(cr, uid, ids, context=context): path = get_module_resource(module.name, 'static', 'description', 'icon.png') if path: image_file = tools.file_open(path, 'rb') try: res[module.id] = image_file.read().encode('base64') finally: image_file.close() return res _columns = { 'name': fields.char("Technical Name", readonly=True, required=True, select=True), 'category_id': fields.many2one('ir.module.category', 'Category', readonly=True, select=True), 'shortdesc': fields.char('Module Name', readonly=True, translate=True), 'summary': fields.char('Summary', readonly=True, translate=True), 'description': fields.text("Description", readonly=True, translate=True), 'description_html': fields.function(_get_desc, string='Description HTML', type='html', method=True, readonly=True), 'author': fields.char("Author", readonly=True), 'maintainer': fields.char('Maintainer', readonly=True), 'contributors': fields.text('Contributors', readonly=True), 'website': fields.char("Website", readonly=True), # attention: Incorrect field names !! # installed_version refers the latest version (the one on disk) # latest_version refers the installed version (the one in database) # published_version refers the version available on the repository 'installed_version': fields.function(_get_latest_version, string='Latest Version', type='char'), 'latest_version': fields.char('Installed Version', readonly=True), 'published_version': fields.char('Published Version', readonly=True), 'url': fields.char('URL', readonly=True), 'sequence': fields.integer('Sequence'), 'dependencies_id': fields.one2many('ir.module.module.dependency', 'module_id', 'Dependencies', readonly=True), 'auto_install': fields.boolean('Automatic Installation', help='An auto-installable module is automatically installed by the ' 'system when all its dependencies are satisfied. ' 'If the module has no dependency, it is always installed.'), 'state': fields.selection([ ('uninstallable', 'Not Installable'), ('uninstalled', 'Not Installed'), ('installed', 'Installed'), ('to upgrade', 'To be upgraded'), ('to remove', 'To be removed'), ('to install', 'To be installed') ], string='Status', readonly=True, select=True), 'demo': fields.boolean('Demo Data', readonly=True), 'license': fields.selection([ ('GPL-2', 'GPL Version 2'), ('GPL-2 or any later version', 'GPL-2 or later version'), ('GPL-3', 'GPL Version 3'), ('GPL-3 or any later version', 'GPL-3 or later version'), ('AGPL-3', 'Affero GPL-3'), ('LGPL-3', 'LGPL Version 3'), ('Other OSI approved licence', 'Other OSI Approved Licence'), ('Other proprietary', 'Other Proprietary') ], string='License', readonly=True), 'menus_by_module': fields.function(_get_views, string='Menus', type='text', multi="meta", store=True), 'reports_by_module': fields.function(_get_views, string='Reports', type='text', multi="meta", store=True), 'views_by_module': fields.function(_get_views, string='Views', type='text', multi="meta", store=True), 'application': fields.boolean('Application', readonly=True), 'icon': fields.char('Icon URL'), 'icon_image': fields.function(_get_icon_image, string='Icon', type="binary"), } _defaults = { 'state': 'uninstalled', 'sequence': 100, 'demo': False, 'license': 'AGPL-3', } _order = 'sequence,name' def _name_uniq_msg(self, cr, uid, ids, context=None): return _('The name of the module must be unique !') _sql_constraints = [ ('name_uniq', 'UNIQUE (name)', _name_uniq_msg), ] def unlink(self, cr, uid, ids, context=None): if not ids: return True if isinstance(ids, (int, long)): ids = [ids] mod_names = [] for mod in self.read(cr, uid, ids, ['state', 'name'], context): if mod['state'] in ('installed', 'to upgrade', 'to remove', 'to install'): raise orm.except_orm(_('Error'), _('You try to remove a module that is installed or will be installed')) mod_names.append(mod['name']) #Removing the entry from ir_model_data #ids_meta = self.pool.get('ir.model.data').search(cr, uid, [('name', '=', 'module_meta_information'), ('module', 'in', mod_names)]) #if ids_meta: # self.pool.get('ir.model.data').unlink(cr, uid, ids_meta, context) return super(module, self).unlink(cr, uid, ids, context=context) @staticmethod def _check_external_dependencies(terp): depends = terp.get('external_dependencies') if not depends: return for pydep in depends.get('python', []): try: importlib.import_module(pydep) except ImportError: raise ImportError('No module named %s' % (pydep,)) for binary in depends.get('bin', []): if tools.find_in_path(binary) is None: raise Exception('Unable to find %r in path' % (binary,)) @classmethod def check_external_dependencies(cls, module_name, newstate='to install'): terp = cls.get_module_info(module_name) try: cls._check_external_dependencies(terp) except Exception, e: if newstate == 'to install': msg = _('Unable to install module "%s" because an external dependency is not met: %s') elif newstate == 'to upgrade': msg = _('Unable to upgrade module "%s" because an external dependency is not met: %s') else: msg = _('Unable to process module "%s" because an external dependency is not met: %s') raise orm.except_orm(_('Error'), msg % (module_name, e.args[0])) @api.multi def state_update(self, newstate, states_to_update, level=100): if level < 1: raise orm.except_orm(_('Error'), _('Recursion error in modules dependencies !')) # whether some modules are installed with demo data demo = False for module in self: # determine dependency modules to update/others update_mods, ready_mods = self.browse(), self.browse() for dep in module.dependencies_id: if dep.state == 'unknown': raise orm.except_orm(_('Error'), _("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system.") % (module.name, dep.name,)) if dep.depend_id.state == newstate: ready_mods += dep.depend_id else: update_mods += dep.depend_id # update dependency modules that require it, and determine demo for module update_demo = update_mods.state_update(newstate, states_to_update, level=level-1) module_demo = module.demo or update_demo or any(mod.demo for mod in ready_mods) demo = demo or module_demo # check dependencies and update module itself self.check_external_dependencies(module.name, newstate) if module.state in states_to_update: module.write({'state': newstate, 'demo': module_demo}) return demo def button_install(self, cr, uid, ids, context=None): # Mark the given modules to be installed. self.state_update(cr, uid, ids, 'to install', ['uninstalled'], context=context) # Mark (recursively) the newly satisfied modules to also be installed # Select all auto-installable (but not yet installed) modules. domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)] uninstalled_ids = self.search(cr, uid, domain, context=context) uninstalled_modules = self.browse(cr, uid, uninstalled_ids, context=context) # Keep those with: # - all dependencies satisfied (installed or to be installed), # - at least one dependency being 'to install' satisfied_states = frozenset(('installed', 'to install', 'to upgrade')) def all_depencies_satisfied(m): states = set(d.state for d in m.dependencies_id) return states.issubset(satisfied_states) and ('to install' in states) to_install_modules = filter(all_depencies_satisfied, uninstalled_modules) to_install_ids = map(lambda m: m.id, to_install_modules) # Mark them to be installed. if to_install_ids: self.button_install(cr, uid, to_install_ids, context=context) return dict(ACTION_DICT, name=_('Install')) def button_immediate_install(self, cr, uid, ids, context=None): """ Installs the selected module(s) immediately and fully, returns the next res.config action to execute :param ids: identifiers of the modules to install :returns: next res.config item to execute :rtype: dict[str, object] """ return self._button_immediate_function(cr, uid, ids, self.button_install, context=context) def button_install_cancel(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'uninstalled', 'demo': False}) return True def module_uninstall(self, cr, uid, ids, context=None): """Perform the various steps required to uninstall a module completely including the deletion of all database structures created by the module: tables, columns, constraints, etc.""" ir_model_data = self.pool.get('ir.model.data') modules_to_remove = [m.name for m in self.browse(cr, uid, ids, context)] ir_model_data._module_data_uninstall(cr, uid, modules_to_remove, context) self.write(cr, uid, ids, {'state': 'uninstalled', 'latest_version': False}) return True def downstream_dependencies(self, cr, uid, ids, known_dep_ids=None, exclude_states=['uninstalled', 'uninstallable', 'to remove'], context=None): """Return the ids of all modules that directly or indirectly depend on the given module `ids`, and that satisfy the `exclude_states` filter""" if not ids: return [] known_dep_ids = set(known_dep_ids or []) cr.execute('''SELECT DISTINCT m.id FROM ir_module_module_dependency d JOIN ir_module_module m ON (d.module_id=m.id) WHERE d.name IN (SELECT name from ir_module_module where id in %s) AND m.state NOT IN %s AND m.id NOT IN %s ''', (tuple(ids), tuple(exclude_states), tuple(known_dep_ids or ids))) new_dep_ids = set([m[0] for m in cr.fetchall()]) missing_mod_ids = new_dep_ids - known_dep_ids known_dep_ids |= new_dep_ids if missing_mod_ids: known_dep_ids |= set(self.downstream_dependencies(cr, uid, list(missing_mod_ids), known_dep_ids, exclude_states, context)) return list(known_dep_ids) def _button_immediate_function(self, cr, uid, ids, function, context=None): function(cr, uid, ids, context=context) cr.commit() api.Environment.reset() registry = openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True) config = registry['res.config'].next(cr, uid, [], context=context) or {} if config.get('type') not in ('ir.actions.act_window_close',): return config # reload the client; open the first available root menu menu_obj = registry['ir.ui.menu'] menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context) return { 'type': 'ir.actions.client', 'tag': 'reload', 'params': {'menu_id': menu_ids and menu_ids[0] or False} } #TODO remove me in master, not called anymore def button_immediate_uninstall(self, cr, uid, ids, context=None): """ Uninstall the selected module(s) immediately and fully, returns the next res.config action to execute """ return self._button_immediate_function(cr, uid, ids, self.button_uninstall, context=context) def button_uninstall(self, cr, uid, ids, context=None): if any(m.name == 'base' for m in self.browse(cr, uid, ids, context=context)): raise orm.except_orm(_('Error'), _("The `base` module cannot be uninstalled")) dep_ids = self.downstream_dependencies(cr, uid, ids, context=context) self.write(cr, uid, ids + dep_ids, {'state': 'to remove'}) return dict(ACTION_DICT, name=_('Uninstall')) def button_uninstall_cancel(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'installed'}) return True def button_immediate_upgrade(self, cr, uid, ids, context=None): """ Upgrade the selected module(s) immediately and fully, return the next res.config action to execute """ return self._button_immediate_function(cr, uid, ids, self.button_upgrade, context=context) def button_upgrade(self, cr, uid, ids, context=None): depobj = self.pool.get('ir.module.module.dependency') todo = list(self.browse(cr, uid, ids, context=context)) self.update_list(cr, uid) i = 0 while i < len(todo): mod = todo[i] i += 1 if mod.state not in ('installed', 'to upgrade'): raise orm.except_orm(_('Error'), _("Can not upgrade module '%s'. It is not installed.") % (mod.name,)) self.check_external_dependencies(mod.name, 'to upgrade') iids = depobj.search(cr, uid, [('name', '=', mod.name)], context=context) for dep in depobj.browse(cr, uid, iids, context=context): if dep.module_id.state == 'installed' and dep.module_id not in todo: todo.append(dep.module_id) ids = map(lambda x: x.id, todo) self.write(cr, uid, ids, {'state': 'to upgrade'}, context=context) to_install = [] for mod in todo: for dep in mod.dependencies_id: if dep.state == 'unknown': raise orm.except_orm(_('Error'), _('You try to upgrade a module that depends on the module: %s.\nBut this module is not available in your system.') % (dep.name,)) if dep.state == 'uninstalled': ids2 = self.search(cr, uid, [('name', '=', dep.name)]) to_install.extend(ids2) self.button_install(cr, uid, to_install, context=context) return dict(ACTION_DICT, name=_('Apply Schedule Upgrade')) def button_upgrade_cancel(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'installed'}) return True def button_update_translations(self, cr, uid, ids, context=None): self.update_translations(cr, uid, ids) return True @staticmethod def get_values_from_terp(terp): return { 'description': terp.get('description', ''), 'shortdesc': terp.get('name', ''), 'author': terp.get('author', 'Unknown'), 'maintainer': terp.get('maintainer', False), 'contributors': ', '.join(terp.get('contributors', [])) or False, 'website': terp.get('website', ''), 'license': terp.get('license', 'AGPL-3'), 'sequence': terp.get('sequence', 100), 'application': terp.get('application', False), 'auto_install': terp.get('auto_install', False), 'icon': terp.get('icon', False), 'summary': terp.get('summary', ''), } def create(self, cr, uid, vals, context=None): new_id = super(module, self).create(cr, uid, vals, context=context) module_metadata = { 'name': 'module_%s' % vals['name'], 'model': 'ir.module.module', 'module': 'base', 'res_id': new_id, 'noupdate': True, } self.pool['ir.model.data'].create(cr, uid, module_metadata) return new_id # update the list of available packages def update_list(self, cr, uid, context=None): res = [0, 0] # [update, add] default_version = modules.adapt_version('1.0') known_mods = self.browse(cr, uid, self.search(cr, uid, [])) known_mods_names = dict([(m.name, m) for m in known_mods]) # iterate through detected modules and update/create them in db for mod_name in modules.get_modules(): mod = known_mods_names.get(mod_name) terp = self.get_module_info(mod_name) values = self.get_values_from_terp(terp) if mod: updated_values = {} for key in values: old = getattr(mod, key) updated = isinstance(values[key], basestring) and tools.ustr(values[key]) or values[key] if (old or updated) and updated != old: updated_values[key] = values[key] if terp.get('installable', True) and mod.state == 'uninstallable': updated_values['state'] = 'uninstalled' if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version): res[0] += 1 if updated_values: self.write(cr, uid, mod.id, updated_values) else: mod_path = modules.get_module_path(mod_name) if not mod_path: continue if not terp or not terp.get('installable', True): continue id = self.create(cr, uid, dict(name=mod_name, state='uninstalled', **values)) mod = self.browse(cr, uid, id) res[1] += 1 self._update_dependencies(cr, uid, mod, terp.get('depends', [])) self._update_category(cr, uid, mod, terp.get('category', 'Uncategorized')) # Trigger load_addons if new module have been discovered it exists on # wsgi handlers, so they can react accordingly if tuple(res) != (0, 0): for handler in openerp.service.wsgi_server.module_handlers: if hasattr(handler, 'load_addons'): handler.load_addons() return res def download(self, cr, uid, ids, download=True, context=None): return [] def install_from_urls(self, cr, uid, urls, context=None): if not self.pool['res.users'].has_group(cr, uid, 'base.group_system'): raise openerp.exceptions.AccessDenied() apps_server = urlparse.urlparse(self.get_apps_server(cr, uid, context=context)) OPENERP = openerp.release.product_name.lower() tmp = tempfile.mkdtemp() _logger.debug('Install from url: %r', urls) try: # 1. Download & unzip missing modules for module_name, url in urls.items(): if not url: continue # nothing to download, local version is already the last one up = urlparse.urlparse(url) if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc: raise openerp.exceptions.AccessDenied() try: _logger.info('Downloading module `%s` from OpenERP Apps', module_name) content = urllib2.urlopen(url).read() except Exception: _logger.exception('Failed to fetch module %s', module_name) raise osv.except_osv(_('Module not found'), _('The `%s` module appears to be unavailable at the moment, please try again later.') % module_name) else: zipfile.ZipFile(StringIO(content)).extractall(tmp) assert os.path.isdir(os.path.join(tmp, module_name)) # 2a. Copy/Replace module source in addons path for module_name, url in urls.items(): if module_name == OPENERP or not url: continue # OPENERP is special case, handled below, and no URL means local module module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False) bck = backup(module_path, False) _logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path) shutil.move(os.path.join(tmp, module_name), module_path) if bck: shutil.rmtree(bck) # 2b. Copy/Replace server+base module source if downloaded if urls.get(OPENERP, None): # special case. it contains the server and the base module. # extract path is not the same base_path = os.path.dirname(modules.get_module_path('base')) # copy all modules in the SERVER/openerp/addons directory to the new "openerp" module (except base itself) for d in os.listdir(base_path): if d != 'base' and os.path.isdir(os.path.join(base_path, d)): destdir = os.path.join(tmp, OPENERP, 'addons', d) # XXX 'openerp' subdirectory ? shutil.copytree(os.path.join(base_path, d), destdir) # then replace the server by the new "base" module server_dir = openerp.tools.config['root_path'] # XXX or dirname() bck = backup(server_dir) _logger.info('Copy downloaded module `openerp` to `%s`', server_dir) shutil.move(os.path.join(tmp, OPENERP), server_dir) #if bck: # shutil.rmtree(bck) self.update_list(cr, uid, context=context) with_urls = [m for m, u in urls.items() if u] downloaded_ids = self.search(cr, uid, [('name', 'in', with_urls)], context=context) already_installed = self.search(cr, uid, [('id', 'in', downloaded_ids), ('state', '=', 'installed')], context=context) to_install_ids = self.search(cr, uid, [('name', 'in', urls.keys()), ('state', '=', 'uninstalled')], context=context) post_install_action = self.button_immediate_install(cr, uid, to_install_ids, context=context) if already_installed: # in this case, force server restart to reload python code... cr.commit() openerp.service.server.restart() return { 'type': 'ir.actions.client', 'tag': 'home', 'params': {'wait': True}, } return post_install_action finally: shutil.rmtree(tmp) def get_apps_server(self, cr, uid, context=None): return tools.config.get('apps_server', 'https://apps.openerp.com/apps') def _update_dependencies(self, cr, uid, mod_browse, depends=None): if depends is None: depends = [] existing = set(x.name for x in mod_browse.dependencies_id) needed = set(depends) for dep in (needed - existing): cr.execute('INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (mod_browse.id, dep)) for dep in (existing - needed): cr.execute('DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (mod_browse.id, dep)) self.invalidate_cache(cr, uid, ['dependencies_id'], [mod_browse.id]) def _update_category(self, cr, uid, mod_browse, category='Uncategorized'): current_category = mod_browse.category_id current_category_path = [] while current_category: current_category_path.insert(0, current_category.name) current_category = current_category.parent_id categs = category.split('/') if categs != current_category_path: cat_id = create_categories(cr, categs) mod_browse.write({'category_id': cat_id}) def update_translations(self, cr, uid, ids, filter_lang=None, context=None): if not filter_lang: res_lang = self.pool.get('res.lang') lang_ids = res_lang.search(cr, uid, [('translatable', '=', True)]) filter_lang = [lang.code for lang in res_lang.browse(cr, uid, lang_ids)] elif not isinstance(filter_lang, (list, tuple)): filter_lang = [filter_lang] modules = [m.name for m in self.browse(cr, uid, ids) if m.state == 'installed'] self.pool.get('ir.translation').load_module_terms(cr, modules, filter_lang, context=context) def check(self, cr, uid, ids, context=None): for mod in self.browse(cr, uid, ids, context=context): if not mod.description: _logger.warning('module %s: description is empty !', mod.name) DEP_STATES = [ ('uninstallable', 'Uninstallable'), ('uninstalled', 'Not Installed'), ('installed', 'Installed'), ('to upgrade', 'To be upgraded'), ('to remove', 'To be removed'), ('to install', 'To be installed'), ('unknown', 'Unknown'), ] class module_dependency(osv.Model): _name = "ir.module.module.dependency" _description = "Module dependency" # the dependency name name = fields2.Char(index=True) # the module that depends on it module_id = fields2.Many2one('ir.module.module', 'Module', ondelete='cascade') # the module corresponding to the dependency, and its status depend_id = fields2.Many2one('ir.module.module', 'Dependency', compute='_compute_depend') state = fields2.Selection(DEP_STATES, string='Status', compute='_compute_state') @api.multi @api.depends('name') def _compute_depend(self): # retrieve all modules corresponding to the dependency names names = list(set(dep.name for dep in self)) mods = self.env['ir.module.module'].search([('name', 'in', names)]) # index modules by name, and assign dependencies name_mod = dict((mod.name, mod) for mod in mods) for dep in self: dep.depend_id = name_mod.get(dep.name) @api.one @api.depends('depend_id.state') def _compute_state(self): self.state = self.depend_id.state or 'unknown' # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
alheinecke/tensorflow-xsmm
refs/heads/master
tensorflow/contrib/learn/python/learn/dataframe/transforms/sparsify.py
76
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Transforms Dense to Sparse Tensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.dataframe import transform from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops class Sparsify(transform.TensorFlowTransform): """Transforms Dense to Sparse Tensor.""" def __init__(self, strip_value): super(Sparsify, self).__init__() self._strip_value = strip_value @transform.parameter def strip_value(self): return self._strip_value @property def name(self): return "Sparsify" @property def input_valency(self): return 1 @property def _output_names(self): return "output", def _apply_transform(self, input_tensors, **kwargs): """Applies the transformation to the `transform_input`. Args: input_tensors: a list of Tensors representing the input to the Transform. **kwargs: Additional keyword arguments, unused here. Returns: A namedtuple of Tensors representing the transformed output. """ d = input_tensors[0] if self.strip_value is np.nan: strip_hot = math_ops.is_nan(d) else: strip_hot = math_ops.equal(d, array_ops.constant([self.strip_value], dtype=d.dtype)) keep_hot = math_ops.logical_not(strip_hot) length = array_ops.reshape(array_ops.shape(d), []) indices = array_ops.boolean_mask(math_ops.range(length), keep_hot) values = array_ops.boolean_mask(d, keep_hot) sparse_indices = array_ops.reshape( math_ops.cast(indices, dtypes.int64), [-1, 1]) shape = math_ops.cast(array_ops.shape(d), dtypes.int64) # pylint: disable=not-callable return self.return_type( sparse_tensor.SparseTensor(sparse_indices, values, shape))
jordanemedlock/psychtruths
refs/heads/master
temboo/Library/Google/Plus/Domains/Circles/Get.py
5
# -*- coding: utf-8 -*- ############################################################################### # # Get # Retrieves a specific circle. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class Get(Choreography): def __init__(self, temboo_session): """ Create a new instance of the Get Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(Get, self).__init__(temboo_session, '/Library/Google/Plus/Domains/Circles/Get') def new_input_set(self): return GetInputSet() def _make_result_set(self, result, path): return GetResultSet(result, path) def _make_execution(self, session, exec_id, path): return GetChoreographyExecution(session, exec_id, path) class GetInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the Get Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.) """ super(GetInputSet, self)._set_input('AccessToken', value) def set_Callback(self, value): """ Set the value of the Callback input for this Choreo. ((optional, string) Specifies a JavaScript function that will be passed the response data for using the API with JSONP.) """ super(GetInputSet, self)._set_input('Callback', value) def set_CircleID(self, value): """ Set the value of the CircleID input for this Choreo. ((required, string) The ID of the circle to get.) """ super(GetInputSet, self)._set_input('CircleID', value) def set_ClientID(self, value): """ Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.) """ super(GetInputSet, self)._set_input('ClientID', value) def set_ClientSecret(self, value): """ Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.) """ super(GetInputSet, self)._set_input('ClientSecret', value) def set_Fields(self, value): """ Set the value of the Fields input for this Choreo. ((optional, string) Used to specify fields to include in a partial response. This can be used to reduce the amount of data returned. See documentation for syntax rules.) """ super(GetInputSet, self)._set_input('Fields', value) def set_PrettyPrint(self, value): """ Set the value of the PrettyPrint input for this Choreo. ((optional, boolean) A flag used to pretty print the JSON response to make it more readable. Defaults to "true".) """ super(GetInputSet, self)._set_input('PrettyPrint', value) def set_RefreshToken(self, value): """ Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.) """ super(GetInputSet, self)._set_input('RefreshToken', value) def set_UserIP(self, value): """ Set the value of the UserIP input for this Choreo. ((optional, string) Identifies the IP address of the end user for whom the API call is being made. Used to enforce per-user quotas.) """ super(GetInputSet, self)._set_input('UserIP', value) class GetResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the Get Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.) """ return self._output.get('Response', None) def get_NewAccessToken(self): """ Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.) """ return self._output.get('NewAccessToken', None) class GetChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return GetResultSet(response, path)
warmspringwinds/scikit-image
refs/heads/master
skimage/color/tests/test_colorconv.py
8
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for color conversion functions. Authors ------- - the rgb2hsv test was written by Nicolas Pinto, 2009 - other tests written by Ralf Gommers, 2009 :license: modified BSD """ from __future__ import division import os.path import numpy as np from numpy.testing import (assert_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, TestCase, ) from skimage import img_as_float, img_as_ubyte from skimage.io import imread from skimage.color import (rgb2hsv, hsv2rgb, rgb2xyz, xyz2rgb, rgb2hed, hed2rgb, separate_stains, combine_stains, rgb2rgbcie, rgbcie2rgb, convert_colorspace, rgb2grey, gray2rgb, xyz2lab, lab2xyz, lab2rgb, rgb2lab, xyz2luv, luv2xyz, luv2rgb, rgb2luv, lab2lch, lch2lab, guess_spatial_dimensions ) from skimage import data_dir from skimage._shared._warnings import expected_warnings import colorsys def test_guess_spatial_dimensions(): im1 = np.zeros((5, 5)) im2 = np.zeros((5, 5, 5)) im3 = np.zeros((5, 5, 3)) im4 = np.zeros((5, 5, 5, 3)) im5 = np.zeros((5,)) assert_equal(guess_spatial_dimensions(im1), 2) assert_equal(guess_spatial_dimensions(im2), 3) assert_equal(guess_spatial_dimensions(im3), None) assert_equal(guess_spatial_dimensions(im4), 3) assert_raises(ValueError, guess_spatial_dimensions, im5) class TestColorconv(TestCase): img_rgb = imread(os.path.join(data_dir, 'color.png')) img_grayscale = imread(os.path.join(data_dir, 'camera.png')) colbars = np.array([[1, 1, 0, 0, 1, 1, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 0]]).astype(np.float) colbars_array = np.swapaxes(colbars.reshape(3, 4, 2), 0, 2) colbars_point75 = colbars * 0.75 colbars_point75_array = np.swapaxes(colbars_point75.reshape(3, 4, 2), 0, 2) xyz_array = np.array([[[0.4124, 0.21260, 0.01930]], # red [[0, 0, 0]], # black [[.9505, 1., 1.089]], # white [[.1805, .0722, .9505]], # blue [[.07719, .15438, .02573]], # green ]) lab_array = np.array([[[53.233, 80.109, 67.220]], # red [[0., 0., 0.]], # black [[100.0, 0.005, -0.010]], # white [[32.303, 79.197, -107.864]], # blue [[46.229, -51.7, 49.898]], # green ]) luv_array = np.array([[[53.233, 175.053, 37.751]], # red [[0., 0., 0.]], # black [[100., 0.001, -0.017]], # white [[32.303, -9.400, -130.358]], # blue [[46.228, -43.774, 56.589]], # green ]) # RGB to HSV def test_rgb2hsv_conversion(self): rgb = img_as_float(self.img_rgb)[::16, ::16] hsv = rgb2hsv(rgb).reshape(-1, 3) # ground truth from colorsys gt = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)] ) assert_almost_equal(hsv, gt) def test_rgb2hsv_error_grayscale(self): self.assertRaises(ValueError, rgb2hsv, self.img_grayscale) def test_rgb2hsv_error_one_element(self): self.assertRaises(ValueError, rgb2hsv, self.img_rgb[0, 0]) # HSV to RGB def test_hsv2rgb_conversion(self): rgb = self.img_rgb.astype("float32")[::16, ::16] # create HSV image with colorsys hsv = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)]).reshape(rgb.shape) # convert back to RGB and compare with original. # relative precision for RGB -> HSV roundtrip is about 1e-6 assert_almost_equal(rgb, hsv2rgb(hsv), decimal=4) def test_hsv2rgb_error_grayscale(self): self.assertRaises(ValueError, hsv2rgb, self.img_grayscale) def test_hsv2rgb_error_one_element(self): self.assertRaises(ValueError, hsv2rgb, self.img_rgb[0, 0]) # RGB to XYZ def test_rgb2xyz_conversion(self): gt = np.array([[[0.950456, 1. , 1.088754], [0.538003, 0.787329, 1.06942 ], [0.592876, 0.28484 , 0.969561], [0.180423, 0.072169, 0.950227]], [[0.770033, 0.927831, 0.138527], [0.35758 , 0.71516 , 0.119193], [0.412453, 0.212671, 0.019334], [0. , 0. , 0. ]]]) assert_almost_equal(rgb2xyz(self.colbars_array), gt) # stop repeating the "raises" checks for all other functions that are # implemented with color._convert() def test_rgb2xyz_error_grayscale(self): self.assertRaises(ValueError, rgb2xyz, self.img_grayscale) def test_rgb2xyz_error_one_element(self): self.assertRaises(ValueError, rgb2xyz, self.img_rgb[0, 0]) # XYZ to RGB def test_xyz2rgb_conversion(self): assert_almost_equal(xyz2rgb(rgb2xyz(self.colbars_array)), self.colbars_array) # RGB<->XYZ roundtrip on another image def test_xyz_rgb_roundtrip(self): img_rgb = img_as_float(self.img_rgb) assert_array_almost_equal(xyz2rgb(rgb2xyz(img_rgb)), img_rgb) # RGB<->HED roundtrip with ubyte image def test_hed_rgb_roundtrip(self): img_rgb = img_as_ubyte(self.img_rgb) with expected_warnings(['precision loss']): new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb))) assert_equal(new, img_rgb) # RGB<->HED roundtrip with float image def test_hed_rgb_float_roundtrip(self): img_rgb = img_as_float(self.img_rgb) assert_array_almost_equal(hed2rgb(rgb2hed(img_rgb)), img_rgb) # RGB<->HDX roundtrip with ubyte image def test_hdx_rgb_roundtrip(self): from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx img_rgb = self.img_rgb conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb), rgb_from_hdx) assert_equal(img_as_ubyte(conv), img_rgb) # RGB<->HDX roundtrip with ubyte image def test_hdx_rgb_roundtrip(self): from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx img_rgb = img_as_float(self.img_rgb) conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb), rgb_from_hdx) assert_array_almost_equal(conv, img_rgb) # RGB to RGB CIE def test_rgb2rgbcie_conversion(self): gt = np.array([[[ 0.1488856 , 0.18288098, 0.19277574], [ 0.01163224, 0.16649536, 0.18948516], [ 0.12259182, 0.03308008, 0.17298223], [-0.01466154, 0.01669446, 0.16969164]], [[ 0.16354714, 0.16618652, 0.0230841 ], [ 0.02629378, 0.1498009 , 0.01979351], [ 0.13725336, 0.01638562, 0.00329059], [ 0. , 0. , 0. ]]]) assert_almost_equal(rgb2rgbcie(self.colbars_array), gt) # RGB CIE to RGB def test_rgbcie2rgb_conversion(self): # only roundtrip test, we checked rgb2rgbcie above already assert_almost_equal(rgbcie2rgb(rgb2rgbcie(self.colbars_array)), self.colbars_array) def test_convert_colorspace(self): colspaces = ['HSV', 'RGB CIE', 'XYZ'] colfuncs_from = [hsv2rgb, rgbcie2rgb, xyz2rgb] colfuncs_to = [rgb2hsv, rgb2rgbcie, rgb2xyz] assert_almost_equal(convert_colorspace(self.colbars_array, 'RGB', 'RGB'), self.colbars_array) for i, space in enumerate(colspaces): gt = colfuncs_from[i](self.colbars_array) assert_almost_equal(convert_colorspace(self.colbars_array, space, 'RGB'), gt) gt = colfuncs_to[i](self.colbars_array) assert_almost_equal(convert_colorspace(self.colbars_array, 'RGB', space), gt) self.assertRaises(ValueError, convert_colorspace, self.colbars_array, 'nokey', 'XYZ') self.assertRaises(ValueError, convert_colorspace, self.colbars_array, 'RGB', 'nokey') def test_rgb2grey(self): x = np.array([1, 1, 1]).reshape((1, 1, 3)).astype(np.float) g = rgb2grey(x) assert_array_almost_equal(g, 1) assert_equal(g.shape, (1, 1)) def test_rgb2grey_on_grey(self): rgb2grey(np.random.rand(5, 5)) # test matrices for xyz2lab and lab2xyz generated using # http://www.easyrgb.com/index.php?X=CALC # Note: easyrgb website displays xyz*100 def test_xyz2lab(self): assert_array_almost_equal(xyz2lab(self.xyz_array), self.lab_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["d50", "d55", "d65", "d75"]: for obs in ["2", "10"]: fname = "lab_array_{0}_{1}.npy".format(I, obs) lab_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(lab_array_I_obs, xyz2lab(self.xyz_array, I, obs), decimal=2) for I in ["a", "e"]: fname = "lab_array_{0}_2.npy".format(I) lab_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(lab_array_I_obs, xyz2lab(self.xyz_array, I, "2"), decimal=2) def test_lab2xyz(self): assert_array_almost_equal(lab2xyz(self.lab_array), self.xyz_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["d50", "d55", "d65", "d75"]: for obs in ["2", "10"]: fname = "lab_array_{0}_{1}.npy".format(I, obs) lab_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, obs), self.xyz_array, decimal=3) for I in ["a", "e"]: fname = "lab_array_{0}_2.npy".format(I, obs) lab_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, "2"), self.xyz_array, decimal=3) # And we include a call to test the exception handling in the code. try: xs = lab2xyz(lab_array_I_obs, "NaI", "2") # Not an illuminant except ValueError: pass try: xs = lab2xyz(lab_array_I_obs, "d50", "42") # Not a degree except ValueError: pass def test_rgb2lab_brucelindbloom(self): """ Test the RGB->Lab conversion by comparing to the calculator on the authoritative Bruce Lindbloom [website](http://brucelindbloom.com/index.html?ColorCalculator.html). """ # Obtained with D65 white point, sRGB model and gamma gt_for_colbars = np.array([ [100,0,0], [97.1393, -21.5537, 94.4780], [91.1132, -48.0875, -14.1312], [87.7347, -86.1827, 83.1793], [60.3242, 98.2343, -60.8249], [53.2408, 80.0925, 67.2032], [32.2970, 79.1875, -107.8602], [0,0,0]]).T gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2) assert_array_almost_equal(rgb2lab(self.colbars_array), gt_array, decimal=2) def test_lab_rgb_roundtrip(self): img_rgb = img_as_float(self.img_rgb) assert_array_almost_equal(lab2rgb(rgb2lab(img_rgb)), img_rgb) # test matrices for xyz2luv and luv2xyz generated using # http://www.easyrgb.com/index.php?X=CALC # Note: easyrgb website displays xyz*100 def test_xyz2luv(self): assert_array_almost_equal(xyz2luv(self.xyz_array), self.luv_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["d50", "d55", "d65", "d75"]: for obs in ["2", "10"]: fname = "luv_array_{0}_{1}.npy".format(I, obs) luv_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(luv_array_I_obs, xyz2luv(self.xyz_array, I, obs), decimal=2) for I in ["a", "e"]: fname = "luv_array_{0}_2.npy".format(I) luv_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(luv_array_I_obs, xyz2luv(self.xyz_array, I, "2"), decimal=2) def test_luv2xyz(self): assert_array_almost_equal(luv2xyz(self.luv_array), self.xyz_array, decimal=3) # Test the conversion with the rest of the illuminants. for I in ["d50", "d55", "d65", "d75"]: for obs in ["2", "10"]: fname = "luv_array_{0}_{1}.npy".format(I, obs) luv_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, obs), self.xyz_array, decimal=3) for I in ["a", "e"]: fname = "luv_array_{0}_2.npy".format(I, obs) luv_array_I_obs = np.load( os.path.join(os.path.dirname(__file__), 'data', fname)) assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, "2"), self.xyz_array, decimal=3) def test_rgb2luv_brucelindbloom(self): """ Test the RGB->Lab conversion by comparing to the calculator on the authoritative Bruce Lindbloom [website](http://brucelindbloom.com/index.html?ColorCalculator.html). """ # Obtained with D65 white point, sRGB model and gamma gt_for_colbars = np.array([ [100, 0, 0], [97.1393, 7.7056, 106.7866], [91.1132, -70.4773, -15.2042], [87.7347, -83.0776, 107.3985], [60.3242, 84.0714, -108.6834], [53.2408, 175.0151, 37.7564], [32.2970, -9.4054, -130.3423], [0, 0, 0]]).T gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2) assert_array_almost_equal(rgb2luv(self.colbars_array), gt_array, decimal=2) def test_luv_rgb_roundtrip(self): img_rgb = img_as_float(self.img_rgb) assert_array_almost_equal(luv2rgb(rgb2luv(img_rgb)), img_rgb) def test_lab_rgb_outlier(self): lab_array = np.ones((3, 1, 3)) lab_array[0] = [50, -12, 85] lab_array[1] = [50, 12, -85] lab_array[2] = [90, -4, -47] rgb_array = np.array([[[0.501, 0.481, 0]], [[0, 0.482, 1.]], [[0.578, 0.914, 1.]], ]) assert_almost_equal(lab2rgb(lab_array), rgb_array, decimal=3) def test_lab_full_gamut(self): a, b = np.meshgrid(np.arange(-100, 100), np.arange(-100, 100)) L = np.ones(a.shape) lab = np.dstack((L, a, b)) for value in [0, 10, 20]: lab[:, :, 0] = value with expected_warnings(['Color data out of range']): lab2xyz(lab) def test_lab_lch_roundtrip(self): rgb = img_as_float(self.img_rgb) lab = rgb2lab(rgb) lab2 = lch2lab(lab2lch(lab)) assert_array_almost_equal(lab2, lab) def test_rgb_lch_roundtrip(self): rgb = img_as_float(self.img_rgb) lab = rgb2lab(rgb) lch = lab2lch(lab) lab2 = lch2lab(lch) rgb2 = lab2rgb(lab2) assert_array_almost_equal(rgb, rgb2) def test_lab_lch_0d(self): lab0 = self._get_lab0() lch0 = lab2lch(lab0) lch2 = lab2lch(lab0[None, None, :]) assert_array_almost_equal(lch0, lch2[0, 0, :]) def test_lab_lch_1d(self): lab0 = self._get_lab0() lch0 = lab2lch(lab0) lch1 = lab2lch(lab0[None, :]) assert_array_almost_equal(lch0, lch1[0, :]) def test_lab_lch_3d(self): lab0 = self._get_lab0() lch0 = lab2lch(lab0) lch3 = lab2lch(lab0[None, None, None, :]) assert_array_almost_equal(lch0, lch3[0, 0, 0, :]) def _get_lab0(self): rgb = img_as_float(self.img_rgb[:1, :1, :]) return rgb2lab(rgb)[0, 0, :] def test_gray2rgb(): x = np.array([0, 0.5, 1]) assert_raises(ValueError, gray2rgb, x) x = x.reshape((3, 1)) y = gray2rgb(x) assert_equal(y.shape, (3, 1, 3)) assert_equal(y.dtype, x.dtype) x = np.array([[0, 128, 255]], dtype=np.uint8) z = gray2rgb(x) assert_equal(z.shape, (1, 3, 3)) assert_equal(z[..., 0], x) assert_equal(z[0, 1, :], [128, 128, 128]) def test_gray2rgb_rgb(): x = np.random.rand(5, 5, 4) y = gray2rgb(x) assert_equal(x, y) if __name__ == "__main__": from numpy.testing import run_module_suite run_module_suite()
CingHu/neutron-ustack
refs/heads/master
neutron/db/migration/alembic_migrations/versions/e28dc49e9e4_add_lbaas_pool_subnet_network_id.py
1
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """empty message Revision ID: e28dc49e9e4 Revises: 130e35166f9d Create Date: 2014-12-24 19:11:43.368566 """ # revision identifiers, used by Alembic. revision = 'e28dc49e9e4' down_revision = '130e35166f9d' from alembic import op import sqlalchemy as sa migration_for_plugins = [ '*', ] from neutron.db import migration def upgrade(active_plugins=None, options=None): if not migration.should_run(active_plugins, migration_for_plugins): return op.add_column('lbaas_pools', sa.Column(u'network_id', sa.String(36), nullable=False)) op.add_column('lbaas_pools', sa.Column(u'subnet_id', sa.String(36), nullable=True)) def downgrade(active_plugins=None, options=None): if not migration.should_run(active_plugins, migration_for_plugins): return op.drop_column('lbaas_pools', 'network_id') op.drop_column('lbaas_pools', 'subnet_id')
kalaspuff/tomodachi
refs/heads/master
tests/services/test/service.py
5
import tomodachi from tomodachi.discovery.dummy_registry import DummyRegistry from tomodachi.envelope.json_base import JsonBase from .code import test_func @tomodachi.service class DummyService(tomodachi.Service): name = "test_dummy" discovery = [DummyRegistry] message_envelope = JsonBase options = { "aws_sns_sqs": { "region_name": "eu-west-1", "aws_access_key_id": "XXXXXXXXX", "aws_secret_access_key": "XXXXXXXXX", }, "amqp": {"port": 54321, "login": "invalid", "password": "invalid"}, } start = False started = False stop = False async def _start_service(self) -> None: test_func() self.start = True async def _started_service(self) -> None: self.started = True async def _stop_service(self) -> None: self.stop = True
matmutant/sl4a
refs/heads/master
python-build/python-libs/python-twitter/twitter_test.py
90
#!/usr/bin/python2.4 # -*- coding: utf-8 -*-# # # Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''Unit tests for the twitter.py library''' __author__ = '[email protected]' import os import simplejson import time import calendar import unittest import twitter class StatusTest(unittest.TestCase): SAMPLE_JSON = '''{"created_at": "Fri Jan 26 23:17:14 +0000 2007", "id": 4391023, "text": "A l\u00e9gp\u00e1rn\u00e1s haj\u00f3m tele van angoln\u00e1kkal.", "user": {"description": "Canvas. JC Penny. Three ninety-eight.", "id": 718443, "location": "Okinawa, Japan", "name": "Kesuke Miyagi", "profile_image_url": "http://twitter.com/system/user/profile_image/718443/normal/kesuke.png", "screen_name": "kesuke", "url": "http://twitter.com/kesuke"}}''' def _GetSampleUser(self): return twitter.User(id=718443, name='Kesuke Miyagi', screen_name='kesuke', description=u'Canvas. JC Penny. Three ninety-eight.', location='Okinawa, Japan', url='http://twitter.com/kesuke', profile_image_url='http://twitter.com/system/user/pro' 'file_image/718443/normal/kesuke.pn' 'g') def _GetSampleStatus(self): return twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007', id=4391023, text=u'A légpárnás hajóm tele van angolnákkal.', user=self._GetSampleUser()) def testInit(self): '''Test the twitter.Status constructor''' status = twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007', id=4391023, text=u'A légpárnás hajóm tele van angolnákkal.', user=self._GetSampleUser()) def testGettersAndSetters(self): '''Test all of the twitter.Status getters and setters''' status = twitter.Status() status.SetId(4391023) self.assertEqual(4391023, status.GetId()) created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1)) status.SetCreatedAt('Fri Jan 26 23:17:14 +0000 2007') self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.GetCreatedAt()) self.assertEqual(created_at, status.GetCreatedAtInSeconds()) status.SetNow(created_at + 10) self.assertEqual("about 10 seconds ago", status.GetRelativeCreatedAt()) status.SetText(u'A légpárnás hajóm tele van angolnákkal.') self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', status.GetText()) status.SetUser(self._GetSampleUser()) self.assertEqual(718443, status.GetUser().id) def testProperties(self): '''Test all of the twitter.Status properties''' status = twitter.Status() status.id = 1 self.assertEqual(1, status.id) created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1)) status.created_at = 'Fri Jan 26 23:17:14 +0000 2007' self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.created_at) self.assertEqual(created_at, status.created_at_in_seconds) status.now = created_at + 10 self.assertEqual('about 10 seconds ago', status.relative_created_at) status.user = self._GetSampleUser() self.assertEqual(718443, status.user.id) def _ParseDate(self, string): return calendar.timegm(time.strptime(string, '%b %d %H:%M:%S %Y')) def testRelativeCreatedAt(self): '''Test various permutations of Status relative_created_at''' status = twitter.Status(created_at='Fri Jan 01 12:00:00 +0000 2007') status.now = self._ParseDate('Jan 01 12:00:00 2007') self.assertEqual('about a second ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:00:01 2007') self.assertEqual('about a second ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:00:02 2007') self.assertEqual('about 2 seconds ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:00:05 2007') self.assertEqual('about 5 seconds ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:00:50 2007') self.assertEqual('about a minute ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:01:00 2007') self.assertEqual('about a minute ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:01:10 2007') self.assertEqual('about a minute ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:02:00 2007') self.assertEqual('about 2 minutes ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:31:50 2007') self.assertEqual('about 31 minutes ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 12:50:00 2007') self.assertEqual('about an hour ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 13:00:00 2007') self.assertEqual('about an hour ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 13:10:00 2007') self.assertEqual('about an hour ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 14:00:00 2007') self.assertEqual('about 2 hours ago', status.relative_created_at) status.now = self._ParseDate('Jan 01 19:00:00 2007') self.assertEqual('about 7 hours ago', status.relative_created_at) status.now = self._ParseDate('Jan 02 11:30:00 2007') self.assertEqual('about a day ago', status.relative_created_at) status.now = self._ParseDate('Jan 04 12:00:00 2007') self.assertEqual('about 3 days ago', status.relative_created_at) status.now = self._ParseDate('Feb 04 12:00:00 2007') self.assertEqual('about 34 days ago', status.relative_created_at) def testAsJsonString(self): '''Test the twitter.Status AsJsonString method''' self.assertEqual(StatusTest.SAMPLE_JSON, self._GetSampleStatus().AsJsonString()) def testAsDict(self): '''Test the twitter.Status AsDict method''' status = self._GetSampleStatus() data = status.AsDict() self.assertEqual(4391023, data['id']) self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', data['created_at']) self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', data['text']) self.assertEqual(718443, data['user']['id']) def testEq(self): '''Test the twitter.Status __eq__ method''' status = twitter.Status() status.created_at = 'Fri Jan 26 23:17:14 +0000 2007' status.id = 4391023 status.text = u'A légpárnás hajóm tele van angolnákkal.' status.user = self._GetSampleUser() self.assertEqual(status, self._GetSampleStatus()) def testNewFromJsonDict(self): '''Test the twitter.Status NewFromJsonDict method''' data = simplejson.loads(StatusTest.SAMPLE_JSON) status = twitter.Status.NewFromJsonDict(data) self.assertEqual(self._GetSampleStatus(), status) class UserTest(unittest.TestCase): SAMPLE_JSON = '''{"description": "Indeterminate things", "id": 673483, "location": "San Francisco, CA", "name": "DeWitt", "profile_image_url": "http://twitter.com/system/user/profile_image/673483/normal/me.jpg", "screen_name": "dewitt", "status": {"created_at": "Fri Jan 26 17:28:19 +0000 2007", "id": 4212713, "text": "\\"Select all\\" and archive your Gmail inbox. The page loads so much faster!"}, "url": "http://unto.net/"}''' def _GetSampleStatus(self): return twitter.Status(created_at='Fri Jan 26 17:28:19 +0000 2007', id=4212713, text='"Select all" and archive your Gmail inbox. ' ' The page loads so much faster!') def _GetSampleUser(self): return twitter.User(id=673483, name='DeWitt', screen_name='dewitt', description=u'Indeterminate things', location='San Francisco, CA', url='http://unto.net/', profile_image_url='http://twitter.com/system/user/prof' 'ile_image/673483/normal/me.jpg', status=self._GetSampleStatus()) def testInit(self): '''Test the twitter.User constructor''' user = twitter.User(id=673483, name='DeWitt', screen_name='dewitt', description=u'Indeterminate things', url='http://twitter.com/dewitt', profile_image_url='http://twitter.com/system/user/prof' 'ile_image/673483/normal/me.jpg', status=self._GetSampleStatus()) def testGettersAndSetters(self): '''Test all of the twitter.User getters and setters''' user = twitter.User() user.SetId(673483) self.assertEqual(673483, user.GetId()) user.SetName('DeWitt') self.assertEqual('DeWitt', user.GetName()) user.SetScreenName('dewitt') self.assertEqual('dewitt', user.GetScreenName()) user.SetDescription('Indeterminate things') self.assertEqual('Indeterminate things', user.GetDescription()) user.SetLocation('San Francisco, CA') self.assertEqual('San Francisco, CA', user.GetLocation()) user.SetProfileImageUrl('http://twitter.com/system/user/profile_im' 'age/673483/normal/me.jpg') self.assertEqual('http://twitter.com/system/user/profile_image/673' '483/normal/me.jpg', user.GetProfileImageUrl()) user.SetStatus(self._GetSampleStatus()) self.assertEqual(4212713, user.GetStatus().id) def testProperties(self): '''Test all of the twitter.User properties''' user = twitter.User() user.id = 673483 self.assertEqual(673483, user.id) user.name = 'DeWitt' self.assertEqual('DeWitt', user.name) user.screen_name = 'dewitt' self.assertEqual('dewitt', user.screen_name) user.description = 'Indeterminate things' self.assertEqual('Indeterminate things', user.description) user.location = 'San Francisco, CA' self.assertEqual('San Francisco, CA', user.location) user.profile_image_url = 'http://twitter.com/system/user/profile_i' \ 'mage/673483/normal/me.jpg' self.assertEqual('http://twitter.com/system/user/profile_image/6734' '83/normal/me.jpg', user.profile_image_url) self.status = self._GetSampleStatus() self.assertEqual(4212713, self.status.id) def testAsJsonString(self): '''Test the twitter.User AsJsonString method''' self.assertEqual(UserTest.SAMPLE_JSON, self._GetSampleUser().AsJsonString()) def testAsDict(self): '''Test the twitter.User AsDict method''' user = self._GetSampleUser() data = user.AsDict() self.assertEqual(673483, data['id']) self.assertEqual('DeWitt', data['name']) self.assertEqual('dewitt', data['screen_name']) self.assertEqual('Indeterminate things', data['description']) self.assertEqual('San Francisco, CA', data['location']) self.assertEqual('http://twitter.com/system/user/profile_image/6734' '83/normal/me.jpg', data['profile_image_url']) self.assertEqual('http://unto.net/', data['url']) self.assertEqual(4212713, data['status']['id']) def testEq(self): '''Test the twitter.User __eq__ method''' user = twitter.User() user.id = 673483 user.name = 'DeWitt' user.screen_name = 'dewitt' user.description = 'Indeterminate things' user.location = 'San Francisco, CA' user.profile_image_url = 'http://twitter.com/system/user/profile_image/67' \ '3483/normal/me.jpg' user.url = 'http://unto.net/' user.status = self._GetSampleStatus() self.assertEqual(user, self._GetSampleUser()) def testNewFromJsonDict(self): '''Test the twitter.User NewFromJsonDict method''' data = simplejson.loads(UserTest.SAMPLE_JSON) user = twitter.User.NewFromJsonDict(data) self.assertEqual(self._GetSampleUser(), user) class FileCacheTest(unittest.TestCase): def testInit(self): """Test the twitter._FileCache constructor""" cache = twitter._FileCache() self.assert_(cache is not None, 'cache is None') def testSet(self): """Test the twitter._FileCache.Set method""" cache = twitter._FileCache() cache.Set("foo",'Hello World!') cache.Remove("foo") def testRemove(self): """Test the twitter._FileCache.Remove method""" cache = twitter._FileCache() cache.Set("foo",'Hello World!') cache.Remove("foo") data = cache.Get("foo") self.assertEqual(data, None, 'data is not None') def testGet(self): """Test the twitter._FileCache.Get method""" cache = twitter._FileCache() cache.Set("foo",'Hello World!') data = cache.Get("foo") self.assertEqual('Hello World!', data) cache.Remove("foo") def testGetCachedTime(self): """Test the twitter._FileCache.GetCachedTime method""" now = time.time() cache = twitter._FileCache() cache.Set("foo",'Hello World!') cached_time = cache.GetCachedTime("foo") delta = cached_time - now self.assert_(delta <= 1, 'Cached time differs from clock time by more than 1 second.') cache.Remove("foo") class ApiTest(unittest.TestCase): def setUp(self): self._urllib = MockUrllib() api = twitter.Api(username='test', password='test', cache=None) api.SetUrllib(self._urllib) self._api = api def testTwitterError(self): '''Test that twitter responses containing an error message are wrapped.''' self._AddHandler('http://twitter.com/statuses/public_timeline.json', curry(self._OpenTestData, 'public_timeline_error.json')) # Manually try/catch so we can check the exception's value try: statuses = self._api.GetPublicTimeline() except twitter.TwitterError, error: # If the error message matches, the test passes self.assertEqual('test error', error.message) else: self.fail('TwitterError expected') def testGetPublicTimeline(self): '''Test the twitter.Api GetPublicTimeline method''' self._AddHandler('http://twitter.com/statuses/public_timeline.json?since_id=12345', curry(self._OpenTestData, 'public_timeline.json')) statuses = self._api.GetPublicTimeline(since_id=12345) # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(20, len(statuses)) self.assertEqual(89497702, statuses[0].id) def testGetUserTimeline(self): '''Test the twitter.Api GetUserTimeline method''' self._AddHandler('http://twitter.com/statuses/user_timeline/kesuke.json?count=1', curry(self._OpenTestData, 'user_timeline-kesuke.json')) statuses = self._api.GetUserTimeline('kesuke', count=1) # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(89512102, statuses[0].id) self.assertEqual(718443, statuses[0].user.id) def testGetFriendsTimeline(self): '''Test the twitter.Api GetFriendsTimeline method''' self._AddHandler('http://twitter.com/statuses/friends_timeline/kesuke.json', curry(self._OpenTestData, 'friends_timeline-kesuke.json')) statuses = self._api.GetFriendsTimeline('kesuke') # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(20, len(statuses)) self.assertEqual(718443, statuses[0].user.id) def testGetStatus(self): '''Test the twitter.Api GetStatus method''' self._AddHandler('http://twitter.com/statuses/show/89512102.json', curry(self._OpenTestData, 'show-89512102.json')) status = self._api.GetStatus(89512102) self.assertEqual(89512102, status.id) self.assertEqual(718443, status.user.id) def testDestroyStatus(self): '''Test the twitter.Api DestroyStatus method''' self._AddHandler('http://twitter.com/statuses/destroy/103208352.json', curry(self._OpenTestData, 'status-destroy.json')) status = self._api.DestroyStatus(103208352) self.assertEqual(103208352, status.id) def testPostUpdate(self): '''Test the twitter.Api PostUpdate method''' self._AddHandler('http://twitter.com/statuses/update.json', curry(self._OpenTestData, 'update.json')) status = self._api.PostUpdate(u'Моё судно на воздушной подушке полно угрей') # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text) def testGetReplies(self): '''Test the twitter.Api GetReplies method''' self._AddHandler('http://twitter.com/statuses/replies.json?page=1', curry(self._OpenTestData, 'replies.json')) statuses = self._api.GetReplies(page=1) self.assertEqual(36657062, statuses[0].id) def testGetFriends(self): '''Test the twitter.Api GetFriends method''' self._AddHandler('http://twitter.com/statuses/friends.json?page=1', curry(self._OpenTestData, 'friends.json')) users = self._api.GetFriends(page=1) buzz = [u.status for u in users if u.screen_name == 'buzz'] self.assertEqual(89543882, buzz[0].id) def testGetFollowers(self): '''Test the twitter.Api GetFollowers method''' self._AddHandler('http://twitter.com/statuses/followers.json?page=1', curry(self._OpenTestData, 'followers.json')) users = self._api.GetFollowers(page=1) # This is rather arbitrary, but spot checking is better than nothing alexkingorg = [u.status for u in users if u.screen_name == 'alexkingorg'] self.assertEqual(89554432, alexkingorg[0].id) def testGetFeatured(self): '''Test the twitter.Api GetFeatured method''' self._AddHandler('http://twitter.com/statuses/featured.json', curry(self._OpenTestData, 'featured.json')) users = self._api.GetFeatured() # This is rather arbitrary, but spot checking is better than nothing stevenwright = [u.status for u in users if u.screen_name == 'stevenwright'] self.assertEqual(86991742, stevenwright[0].id) def testGetDirectMessages(self): '''Test the twitter.Api GetDirectMessages method''' self._AddHandler('http://twitter.com/direct_messages.json?page=1', curry(self._OpenTestData, 'direct_messages.json')) statuses = self._api.GetDirectMessages(page=1) self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', statuses[0].text) def testPostDirectMessage(self): '''Test the twitter.Api PostDirectMessage method''' self._AddHandler('http://twitter.com/direct_messages/new.json', curry(self._OpenTestData, 'direct_messages-new.json')) status = self._api.PostDirectMessage('test', u'Моё судно на воздушной подушке полно угрей') # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text) def testDestroyDirectMessage(self): '''Test the twitter.Api DestroyDirectMessage method''' self._AddHandler('http://twitter.com/direct_messages/destroy/3496342.json', curry(self._OpenTestData, 'direct_message-destroy.json')) status = self._api.DestroyDirectMessage(3496342) # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(673483, status.sender_id) def testCreateFriendship(self): '''Test the twitter.Api CreateFriendship method''' self._AddHandler('http://twitter.com/friendships/create/dewitt.json', curry(self._OpenTestData, 'friendship-create.json')) user = self._api.CreateFriendship('dewitt') # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(673483, user.id) def testDestroyFriendship(self): '''Test the twitter.Api DestroyFriendship method''' self._AddHandler('http://twitter.com/friendships/destroy/dewitt.json', curry(self._OpenTestData, 'friendship-destroy.json')) user = self._api.DestroyFriendship('dewitt') # This is rather arbitrary, but spot checking is better than nothing self.assertEqual(673483, user.id) def testGetUser(self): '''Test the twitter.Api GetUser method''' self._AddHandler('http://twitter.com/users/show/dewitt.json', curry(self._OpenTestData, 'show-dewitt.json')) user = self._api.GetUser('dewitt') self.assertEqual('dewitt', user.screen_name) self.assertEqual(89586072, user.status.id) def _AddHandler(self, url, callback): self._urllib.AddHandler(url, callback) def _GetTestDataPath(self, filename): directory = os.path.dirname(os.path.abspath(__file__)) test_data_dir = os.path.join(directory, 'testdata') return os.path.join(test_data_dir, filename) def _OpenTestData(self, filename): return open(self._GetTestDataPath(filename)) class MockUrllib(object): '''A mock replacement for urllib that hardcodes specific responses.''' def __init__(self): self._handlers = {} self.HTTPBasicAuthHandler = MockHTTPBasicAuthHandler def AddHandler(self, url, callback): self._handlers[url] = callback def build_opener(self, *handlers): return MockOpener(self._handlers) class MockOpener(object): '''A mock opener for urllib''' def __init__(self, handlers): self._handlers = handlers self._opened = False def open(self, url, data=None): if self._opened: raise Exception('MockOpener already opened.') if url in self._handlers: self._opened = True return self._handlers[url]() else: raise Exception('Unexpected URL %s' % url) def close(self): if not self._opened: raise Exception('MockOpener closed before it was opened.') self._opened = False class MockHTTPBasicAuthHandler(object): '''A mock replacement for HTTPBasicAuthHandler''' def add_password(self, realm, uri, user, passwd): # TODO(dewitt): Add verification that the proper args are passed pass class curry: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 def __init__(self, fun, *args, **kwargs): self.fun = fun self.pending = args[:] self.kwargs = kwargs.copy() def __call__(self, *args, **kwargs): if kwargs and self.kwargs: kw = self.kwargs.copy() kw.update(kwargs) else: kw = kwargs or self.kwargs return self.fun(*(self.pending + args), **kw) def suite(): suite = unittest.TestSuite() suite.addTests(unittest.makeSuite(FileCacheTest)) suite.addTests(unittest.makeSuite(StatusTest)) suite.addTests(unittest.makeSuite(UserTest)) suite.addTests(unittest.makeSuite(ApiTest)) return suite if __name__ == '__main__': unittest.main()
Venturi/cms
refs/heads/master
env/lib/python2.7/site-packages/unidecode/x0b3.py
253
data = ( 'dae', # 0x00 'daeg', # 0x01 'daegg', # 0x02 'daegs', # 0x03 'daen', # 0x04 'daenj', # 0x05 'daenh', # 0x06 'daed', # 0x07 'dael', # 0x08 'daelg', # 0x09 'daelm', # 0x0a 'daelb', # 0x0b 'daels', # 0x0c 'daelt', # 0x0d 'daelp', # 0x0e 'daelh', # 0x0f 'daem', # 0x10 'daeb', # 0x11 'daebs', # 0x12 'daes', # 0x13 'daess', # 0x14 'daeng', # 0x15 'daej', # 0x16 'daec', # 0x17 'daek', # 0x18 'daet', # 0x19 'daep', # 0x1a 'daeh', # 0x1b 'dya', # 0x1c 'dyag', # 0x1d 'dyagg', # 0x1e 'dyags', # 0x1f 'dyan', # 0x20 'dyanj', # 0x21 'dyanh', # 0x22 'dyad', # 0x23 'dyal', # 0x24 'dyalg', # 0x25 'dyalm', # 0x26 'dyalb', # 0x27 'dyals', # 0x28 'dyalt', # 0x29 'dyalp', # 0x2a 'dyalh', # 0x2b 'dyam', # 0x2c 'dyab', # 0x2d 'dyabs', # 0x2e 'dyas', # 0x2f 'dyass', # 0x30 'dyang', # 0x31 'dyaj', # 0x32 'dyac', # 0x33 'dyak', # 0x34 'dyat', # 0x35 'dyap', # 0x36 'dyah', # 0x37 'dyae', # 0x38 'dyaeg', # 0x39 'dyaegg', # 0x3a 'dyaegs', # 0x3b 'dyaen', # 0x3c 'dyaenj', # 0x3d 'dyaenh', # 0x3e 'dyaed', # 0x3f 'dyael', # 0x40 'dyaelg', # 0x41 'dyaelm', # 0x42 'dyaelb', # 0x43 'dyaels', # 0x44 'dyaelt', # 0x45 'dyaelp', # 0x46 'dyaelh', # 0x47 'dyaem', # 0x48 'dyaeb', # 0x49 'dyaebs', # 0x4a 'dyaes', # 0x4b 'dyaess', # 0x4c 'dyaeng', # 0x4d 'dyaej', # 0x4e 'dyaec', # 0x4f 'dyaek', # 0x50 'dyaet', # 0x51 'dyaep', # 0x52 'dyaeh', # 0x53 'deo', # 0x54 'deog', # 0x55 'deogg', # 0x56 'deogs', # 0x57 'deon', # 0x58 'deonj', # 0x59 'deonh', # 0x5a 'deod', # 0x5b 'deol', # 0x5c 'deolg', # 0x5d 'deolm', # 0x5e 'deolb', # 0x5f 'deols', # 0x60 'deolt', # 0x61 'deolp', # 0x62 'deolh', # 0x63 'deom', # 0x64 'deob', # 0x65 'deobs', # 0x66 'deos', # 0x67 'deoss', # 0x68 'deong', # 0x69 'deoj', # 0x6a 'deoc', # 0x6b 'deok', # 0x6c 'deot', # 0x6d 'deop', # 0x6e 'deoh', # 0x6f 'de', # 0x70 'deg', # 0x71 'degg', # 0x72 'degs', # 0x73 'den', # 0x74 'denj', # 0x75 'denh', # 0x76 'ded', # 0x77 'del', # 0x78 'delg', # 0x79 'delm', # 0x7a 'delb', # 0x7b 'dels', # 0x7c 'delt', # 0x7d 'delp', # 0x7e 'delh', # 0x7f 'dem', # 0x80 'deb', # 0x81 'debs', # 0x82 'des', # 0x83 'dess', # 0x84 'deng', # 0x85 'dej', # 0x86 'dec', # 0x87 'dek', # 0x88 'det', # 0x89 'dep', # 0x8a 'deh', # 0x8b 'dyeo', # 0x8c 'dyeog', # 0x8d 'dyeogg', # 0x8e 'dyeogs', # 0x8f 'dyeon', # 0x90 'dyeonj', # 0x91 'dyeonh', # 0x92 'dyeod', # 0x93 'dyeol', # 0x94 'dyeolg', # 0x95 'dyeolm', # 0x96 'dyeolb', # 0x97 'dyeols', # 0x98 'dyeolt', # 0x99 'dyeolp', # 0x9a 'dyeolh', # 0x9b 'dyeom', # 0x9c 'dyeob', # 0x9d 'dyeobs', # 0x9e 'dyeos', # 0x9f 'dyeoss', # 0xa0 'dyeong', # 0xa1 'dyeoj', # 0xa2 'dyeoc', # 0xa3 'dyeok', # 0xa4 'dyeot', # 0xa5 'dyeop', # 0xa6 'dyeoh', # 0xa7 'dye', # 0xa8 'dyeg', # 0xa9 'dyegg', # 0xaa 'dyegs', # 0xab 'dyen', # 0xac 'dyenj', # 0xad 'dyenh', # 0xae 'dyed', # 0xaf 'dyel', # 0xb0 'dyelg', # 0xb1 'dyelm', # 0xb2 'dyelb', # 0xb3 'dyels', # 0xb4 'dyelt', # 0xb5 'dyelp', # 0xb6 'dyelh', # 0xb7 'dyem', # 0xb8 'dyeb', # 0xb9 'dyebs', # 0xba 'dyes', # 0xbb 'dyess', # 0xbc 'dyeng', # 0xbd 'dyej', # 0xbe 'dyec', # 0xbf 'dyek', # 0xc0 'dyet', # 0xc1 'dyep', # 0xc2 'dyeh', # 0xc3 'do', # 0xc4 'dog', # 0xc5 'dogg', # 0xc6 'dogs', # 0xc7 'don', # 0xc8 'donj', # 0xc9 'donh', # 0xca 'dod', # 0xcb 'dol', # 0xcc 'dolg', # 0xcd 'dolm', # 0xce 'dolb', # 0xcf 'dols', # 0xd0 'dolt', # 0xd1 'dolp', # 0xd2 'dolh', # 0xd3 'dom', # 0xd4 'dob', # 0xd5 'dobs', # 0xd6 'dos', # 0xd7 'doss', # 0xd8 'dong', # 0xd9 'doj', # 0xda 'doc', # 0xdb 'dok', # 0xdc 'dot', # 0xdd 'dop', # 0xde 'doh', # 0xdf 'dwa', # 0xe0 'dwag', # 0xe1 'dwagg', # 0xe2 'dwags', # 0xe3 'dwan', # 0xe4 'dwanj', # 0xe5 'dwanh', # 0xe6 'dwad', # 0xe7 'dwal', # 0xe8 'dwalg', # 0xe9 'dwalm', # 0xea 'dwalb', # 0xeb 'dwals', # 0xec 'dwalt', # 0xed 'dwalp', # 0xee 'dwalh', # 0xef 'dwam', # 0xf0 'dwab', # 0xf1 'dwabs', # 0xf2 'dwas', # 0xf3 'dwass', # 0xf4 'dwang', # 0xf5 'dwaj', # 0xf6 'dwac', # 0xf7 'dwak', # 0xf8 'dwat', # 0xf9 'dwap', # 0xfa 'dwah', # 0xfb 'dwae', # 0xfc 'dwaeg', # 0xfd 'dwaegg', # 0xfe 'dwaegs', # 0xff )
traff/dtcov
refs/heads/master
test/test_parser.py
1
import unittest import textwrap from dtcov.dt_report import DjangoTemplateCodeParser class ParserTest(unittest.TestCase): def parse_source(self, text): text = textwrap.dedent(text) cp = DjangoTemplateCodeParser(text=text, exclude="nocover") ret = cp.parse_source() return cp, ret def test_else_if(self): (cp, ret) = self.parse_source("""\ Test {% if True %} True {% else %} False {% endif %} """) self.assertEquals((2, 4, 6), ret[0]) def test_text_with_braces(self): (cp, ret) = self.parse_source("""\ Test {% if True %} { { } } {% else %} { % { {% endif %} % }} """) self.assertEquals((2, 4, 6), ret[0]) def test_expressions(self): (cp, ret) = self.parse_source("""\ {{ value}} text }} {% if True %} {{ true }} {% endif %} """) self.assertEquals((1, 3, 4, 5), ret[0]) def test_comments(self): (cp, ret) = self.parse_source("""\ {% comment %} Comment {{ comment }} {% endcomment %} {{ value }} {% if True %} True {# {{ true }} and {{ false }} #} {% endif %} """) self.assertEquals((4, 5, 8), ret[0])
jorgealmerio/QEsg
refs/heads/master
core/ezdxf/tools/codepage.py
1
# Purpose: constant values # Created: 10.03.2011 # Copyright (C) 2011, Manfred Moitzi # License: MIT License from __future__ import unicode_literals __author__ = "mozman <[email protected]>" codepage_to_encoding = { '874': 'cp874', # Thai, '932': 'cp932', # Japanese '936': 'gbk', # UnifiedChinese '949': 'cp949', # Korean '950': 'cp950', # TradChinese '1250': 'cp1250', # CentralEurope '1251': 'cp1251', # Cyrillic '1252': 'cp1252', # WesternEurope '1253': 'cp1253', # Greek '1254': 'cp1254', # Turkish '1255': 'cp1255', # Hebrew '1256': 'cp1256', # Arabic '1257': 'cp1257', # Baltic '1258': 'cp1258', # Vietnam } encoding_to_codepage = { codec: ansi for ansi, codec in codepage_to_encoding.items() } def is_supported_encoding(encoding='cp1252'): return encoding in encoding_to_codepage def toencoding(dxfcodepage): for codepage, encoding in codepage_to_encoding.items(): if dxfcodepage.endswith(codepage): return encoding return 'cp1252' def tocodepage(encoding): return 'ANSI_' + encoding_to_codepage.get(encoding, '1252')
KevinShawn/WinObjC
refs/heads/master
deps/3rdparty/cairolegacy/perf/make-html.py
169
#!/usr/bin/python from string import strip from sys import stdin targets = {} smilies = {'slowdown': '&#9785;' , 'speedup': '&#9786;'} for line in stdin: line = map(strip, filter(None, line.split(' '))) if 9 == len(line): target, name = line[0:2] factor, dir = line[-2:] name = name.split('-') name, size = '-'.join(name[:-1]), name[-1] target_tests = targets.get(target, {}) name_tests = target_tests.get(name, {}) name_tests[int(size)] = (factor, dir) target_tests[name] = name_tests targets[target] = target_tests print '''\ <html><head> <title>Performance Changes</title> <style type="text/css">/*<![CDATA[*/ body { background: white; color: black; } table { border-collapse: collapse; } th, td { border: 1px solid silver; padding: 0.2em; } td { text-align: center; } th:first-child { text-align: left; } th { background: #eee; } /* those colors also should work for color blinds */ td.slowdown { background: #f93; } td.speedup { background: #6f9; } /*]]>*/</style> </head><body> <h1>Performance Changes</h1>''' targets = targets.items() targets.sort(lambda a, b: cmp(a[0], b[0])) for target, names in targets: sizes = {} for tests in names.values(): for size in tests.keys(): sizes[size] = True sizes = sizes.keys() sizes.sort() names = names.items() names.sort(lambda a, b: cmp(a[0], b[0])) print '<h2><a name="%s">%s</a></h2>' % (target, target) print '<table><thead><tr><th>&nbsp;</th>' for size in sizes: print '<th>%s</th>' % size print '</tr></thead><tbody>' for name, tests in names: print '<tr><th>%s</th>' % name for size in sizes: result = tests.get(size) if result: factor, dir = result print '<td class="%s">%s %s</td>' % ( dir, factor, smilies[dir]) else: print '<td>&nbsp;</td>' print '</tr>' print '</tbody></table>' print '</body></html>'
rvlm/rvlm-paraform
refs/heads/init
src/rvlm/paraform/utils.py
1
r""" Helper functions ================ This document describes functions from RVLM Paraform project, which get available after the following module import: .. code-block:: python import rvlm.paraform.utils Despite this module isn't for internal use only, it's recommended for library user to rely on its content with extra caution, as some helper functions may get eventually removed. This module is a helper in nature, so it's not devoted to a narrow specific topic. Instead, many topics comes here, each having it's own section in this documentation. Vector operations ----------------- This section contains operations on vectors in tri-dimensional space. Supposing that an orthonormal Cartesian coordinate system is present, each vector is essentially an ordered triplet of real numbers: .. math:: \vec{v} = (v_x, v_y, v_z). In Python this triplet can be represented as tuple of list. The library allows both of them to be passed as parameters, but the tuple form is preferred for its immutability. All vectors the library calculates by itself and return to user are always tuples. .. autofunction:: cross_product .. autofunction:: unit_vector Parameter validation -------------------- .. autofunction:: is_number .. autofunction:: is_vector_2d .. autofunction:: is_vector_3d Miscellaneous ------------- .. autofunction:: expand_function """ import math as _math import rvlm.paraform.autogen as _autogen def cross_product(a, b): r""" Calculates a cross product (or vector product) of two vectors in tri-dimensional Cartesian coordinates. :type a: (float, float, float) | list[float] :parameter a: Vector :math:`\vec{a}` coordinates :math:`(a_x, a_y, a_z)` as tuple or list. :type b: (float, float, float) | list[float] :parameter b: Vector :math:`\vec{b}` coordinates :math:`(b_x, b_y, b_z)` as tuple or list. :rtype: (float, float, float) :returns: Vector product :math:`[\vec{a} \times \vec{b}]`, calculated using the following formula: .. math:: [\vec{a} \times \vec{b}] = \left| \begin{array}{ccc} \vec{i} & \vec{j} & \vec{k} \\ a_x & a_y & a_z \\ b_x & b_y & b_z \end{array} \right|. """ return (a[1]*b[2] - a[2]*b[0], a[2]*b[0] - a[0]*b[2], a[0]*b[1] - a[1]*b[0]) def unit_vector(v): r""" Returns vector, codirectional to :math:`\vec{v}`, but length of 1. :type v: (float, float, float) | list[float] :parameter v: Vector :math:`\vec{v}` coordinates :math:`(v_x, v_y, v_z)` as tuple or list. :rtype: (float, float, float) :returns: Unit vector (or *orth* vector) :math:`\vec{e}_\vec{v}`, calculated as the following: .. math:: \vec{e}_\vec{a} = \frac{1}{|\vec{v}|} \vec{v}, \quad |\vec{v}| = \sqrt{v_x^2 + v_y^2 + v_z^2}. .. seealso:: https://en.wikipedia.org/wiki/Unit_vector """ (vx, vy, vz) = v norm = _math.sqrt(vx*vx + vy*vy + vz*vz) return (vx/norm, vy/norm, vz/norm) def is_number(x): """ """ return type(x) == float or type(x) == int def is_vector_2d(v): """ Checks whether `v` is two-component vector. Returns :const:`True` if `v` is list or tuple or 2 numbers and :const:`False` if it isn't. """ return (v is not None and (type(v) == list or type(v) == tuple) and len(v) == 2 and is_number(v[0]) and is_number(v[1])) def is_vector_3d(v): """ Checks whether `v` is a three-component vector. Returns :const:`True` when `v` is a list or tuple with exactly three number items. >>> is_vector_3d(None) False >>> is_vector_3d([1.0, 2.0]) False >>> is_vector_3d([1, 2, 3.0]) True """ return (v is not None and (type(v) == list or type(v) == tuple) and len(v) == 3 and is_number(v[0]) and is_number(v[1]) and is_number(v[2])) def expand_function(f, argspec): """ Expands function `f` to take exactly three arguments and return exactly three results. Original `f` may take any non-empty subset of (x, y, z) arguments in any order, and return from one to three results. The exact parameter layout of `f` is prescribed by `argspec` argument, which is just a plain string, formatted like "<RESULTS>_<ARGUMENTS>". For example, if `argspec` equals to "xz_yx", it means that `x, z = f(y, x)` or, more specifically: .. code-block:: python def expand_function(f, "xz_yx"): def result(x, y, z): x, z = f(y, z) return x, y, z return result All possible values for RESULTS (or ARGUMENTS) are: "x", "y", "z", "xy", "xz", "yz", "yz", "zx", "zy", "xyz", "xzy", "yxz", "yzx", "zxy", and "zyx", total 15. Thus, the overall number of possible `argspec` variants is 225. This function internally implemented through Python code generation in order to support every possible variant of `argspec`. """ return _autogen.expand_function_autogen(f, argspec)
malept/youtube-dl
refs/heads/master
youtube_dl/extractor/rds.py
57
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, js_to_json, ) from ..compat import compat_str class RDSIE(InfoExtractor): IE_DESC = 'RDS.ca' _VALID_URL = r'https?://(?:www\.)?rds\.ca/vid(?:[eé]|%C3%A9)os/(?:[^/]+/)*(?P<id>[^/]+)-\d+\.\d+' _TESTS = [{ 'url': 'http://www.rds.ca/videos/football/nfl/fowler-jr-prend-la-direction-de-jacksonville-3.1132799', 'info_dict': { 'id': '604333', 'display_id': 'fowler-jr-prend-la-direction-de-jacksonville', 'ext': 'mp4', 'title': 'Fowler Jr. prend la direction de Jacksonville', 'description': 'Dante Fowler Jr. est le troisième choix du repêchage 2015 de la NFL. ', 'timestamp': 1430397346, 'upload_date': '20150430', 'duration': 154.354, 'age_limit': 0, } }, { 'url': 'http://www.rds.ca/vid%C3%A9os/un-voyage-positif-3.877934', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) item = self._parse_json(self._search_regex(r'(?s)itemToPush\s*=\s*({.+?});', webpage, 'item'), display_id, js_to_json) video_id = compat_str(item['id']) title = item.get('title') or self._og_search_title(webpage) or self._html_search_meta( 'title', webpage, 'title', fatal=True) description = self._og_search_description(webpage) or self._html_search_meta( 'description', webpage, 'description') thumbnail = item.get('urlImageBig') or self._og_search_thumbnail(webpage) or self._search_regex( [r'<link[^>]+itemprop="thumbnailUrl"[^>]+href="([^"]+)"', r'<span[^>]+itemprop="thumbnailUrl"[^>]+content="([^"]+)"'], webpage, 'thumbnail', fatal=False) timestamp = parse_iso8601(self._search_regex( r'<span[^>]+itemprop="uploadDate"[^>]+content="([^"]+)"', webpage, 'upload date', fatal=False)) duration = parse_duration(self._search_regex( r'<span[^>]+itemprop="duration"[^>]+content="([^"]+)"', webpage, 'duration', fatal=False)) age_limit = self._family_friendly_search(webpage) return { '_type': 'url_transparent', 'id': video_id, 'display_id': display_id, 'url': '9c9media:rds_web:%s' % video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'age_limit': age_limit, 'ie_key': 'NineCNineMedia', }
shadowcoin/lightcoin
refs/heads/master
share/qt/extract_strings_qt.py
2945
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt4 stringdefs so that they can be picked up by Qt linguist. ''' from subprocess import Popen, PIPE import glob import operator OUT_CPP="src/qt/bitcoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = glob.glob('src/*.cpp') + glob.glob('src/*.h') # xgettext -n --keyword=_ $FILES child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out) f = open(OUT_CPP, 'w') f.write("""#include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *bitcoin_strings[] = {\n') messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};') f.close()
SoftwareLiteracyFoundation/Python-Programs
refs/heads/master
program_8_List_Functions.py
1
# Create a list of state names stateList1 = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California'] stateList2 = ['Colorado', 'Conneticut', 'Delaware', 'Florida', 'Georgia'] stateList3 = ['Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas'] # a function to print the functions of a list def PrintListFunctions(): listFunctions = ['append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort'] print('The functions that belong to a list are:') for function in listFunctions : print( function, ' ', end='' ) print() def ShowListFunctions(): # tell Python that we want to be able to change these # lists, even though they are not in this function global stateList1, stateList2, stateList3 print('The first list is:', stateList1) # Add a state to the first list stateList1.append('Wyoming') # remove a state stateList1.remove('Alabama') print('The list was changed:', stateList1) print('The second list is:', stateList2) # reverse a list stateList2.reverse() print('This list was reversed:', stateList2) def main(): PrintListFunctions() ShowListFunctions() # This tells Python to run the function called main() if __name__ == "__main__": main()
buguelos/odoo
refs/heads/master
openerp/addons/base/ir/ir_rule.py
312
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import fields, osv, expression from openerp.tools.safe_eval import safe_eval as eval from openerp.tools.misc import unquote as unquote class ir_rule(osv.osv): _name = 'ir.rule' _order = 'name' _MODES = ['read', 'write', 'create', 'unlink'] def _eval_context_for_combinations(self): """Returns a dictionary to use as evaluation context for ir.rule domains, when the goal is to obtain python lists that are easier to parse and combine, but not to actually execute them.""" return {'user': unquote('user'), 'time': unquote('time')} def _eval_context(self, cr, uid): """Returns a dictionary to use as evaluation context for ir.rule domains.""" return {'user': self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid), 'time':time} def _domain_force_get(self, cr, uid, ids, field_name, arg, context=None): res = {} eval_context = self._eval_context(cr, uid) for rule in self.browse(cr, uid, ids, context): if rule.domain_force: res[rule.id] = expression.normalize_domain(eval(rule.domain_force, eval_context)) else: res[rule.id] = [] return res def _get_value(self, cr, uid, ids, field_name, arg, context=None): res = {} for rule in self.browse(cr, uid, ids, context): if not rule.groups: res[rule.id] = True else: res[rule.id] = False return res def _check_model_obj(self, cr, uid, ids, context=None): return not any(self.pool[rule.model_id.model].is_transient() for rule in self.browse(cr, uid, ids, context)) def _check_model_name(self, cr, uid, ids, context=None): # Don't allow rules on rules records (this model). return not any(rule.model_id.model == self._name for rule in self.browse(cr, uid, ids, context)) _columns = { 'name': fields.char('Name', select=1), 'active': fields.boolean('Active', help="If you uncheck the active field, it will disable the record rule without deleting it (if you delete a native record rule, it may be re-created when you reload the module."), 'model_id': fields.many2one('ir.model', 'Object',select=1, required=True, ondelete="cascade"), 'global': fields.function(_get_value, string='Global', type='boolean', store=True, help="If no group is specified the rule is global and applied to everyone"), 'groups': fields.many2many('res.groups', 'rule_group_rel', 'rule_group_id', 'group_id', 'Groups'), 'domain_force': fields.text('Domain'), 'domain': fields.function(_domain_force_get, string='Domain', type='binary'), 'perm_read': fields.boolean('Apply for Read'), 'perm_write': fields.boolean('Apply for Write'), 'perm_create': fields.boolean('Apply for Create'), 'perm_unlink': fields.boolean('Apply for Delete') } _order = 'model_id DESC' _defaults = { 'active': True, 'perm_read': True, 'perm_write': True, 'perm_create': True, 'perm_unlink': True, 'global': True, } _sql_constraints = [ ('no_access_rights', 'CHECK (perm_read!=False or perm_write!=False or perm_create!=False or perm_unlink!=False)', 'Rule must have at least one checked access right !'), ] _constraints = [ (_check_model_obj, 'Rules can not be applied on Transient models.', ['model_id']), (_check_model_name, 'Rules can not be applied on the Record Rules model.', ['model_id']), ] @tools.ormcache() def _compute_domain(self, cr, uid, model_name, mode="read"): if mode not in self._MODES: raise ValueError('Invalid mode: %r' % (mode,)) if uid == SUPERUSER_ID: return None cr.execute("""SELECT r.id FROM ir_rule r JOIN ir_model m ON (r.model_id = m.id) WHERE m.model = %s AND r.active is True AND r.perm_""" + mode + """ AND (r.id IN (SELECT rule_group_id FROM rule_group_rel g_rel JOIN res_groups_users_rel u_rel ON (g_rel.group_id = u_rel.gid) WHERE u_rel.uid = %s) OR r.global)""", (model_name, uid)) rule_ids = [x[0] for x in cr.fetchall()] if rule_ids: # browse user as super-admin root to avoid access errors! user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid) global_domains = [] # list of domains group_domains = {} # map: group -> list of domains for rule in self.browse(cr, SUPERUSER_ID, rule_ids): # read 'domain' as UID to have the correct eval context for the rule. rule_domain = self.read(cr, uid, [rule.id], ['domain'])[0]['domain'] dom = expression.normalize_domain(rule_domain) for group in rule.groups: if group in user.groups_id: group_domains.setdefault(group, []).append(dom) if not rule.groups: global_domains.append(dom) # combine global domains and group domains if group_domains: group_domain = expression.OR(map(expression.OR, group_domains.values())) else: group_domain = [] domain = expression.AND(global_domains + [group_domain]) return domain return [] def clear_cache(self, cr, uid): self._compute_domain.clear_cache(self) def domain_get(self, cr, uid, model_name, mode='read', context=None): dom = self._compute_domain(cr, uid, model_name, mode) if dom: # _where_calc is called as superuser. This means that rules can # involve objects on which the real uid has no acces rights. # This means also there is no implicit restriction (e.g. an object # references another object the user can't see). query = self.pool[model_name]._where_calc(cr, SUPERUSER_ID, dom, active_test=False) return query.where_clause, query.where_clause_params, query.tables return [], [], ['"' + self.pool[model_name]._table + '"'] def unlink(self, cr, uid, ids, context=None): res = super(ir_rule, self).unlink(cr, uid, ids, context=context) self.clear_cache(cr, uid) return res def create(self, cr, uid, vals, context=None): res = super(ir_rule, self).create(cr, uid, vals, context=context) self.clear_cache(cr, uid) return res def write(self, cr, uid, ids, vals, context=None): res = super(ir_rule, self).write(cr, uid, ids, vals, context=context) self.clear_cache(cr,uid) return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
digrich/bubblechart-panel
refs/heads/master
node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py
1831
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prints the information in a sln file in a diffable way. It first outputs each projects in alphabetical order with their dependencies. Then it outputs a possible build order. """ __author__ = 'nsylvain (Nicolas Sylvain)' import os import re import sys import pretty_vcproj def BuildProject(project, built, projects, deps): # if all dependencies are done, we can build it, otherwise we try to build the # dependency. # This is not infinite-recursion proof. for dep in deps[project]: if dep not in built: BuildProject(dep, built, projects, deps) print project built.append(project) def ParseSolution(solution_file): # All projects, their clsid and paths. projects = dict() # A list of dependencies associated with a project. dependencies = dict() # Regular expressions that matches the SLN format. # The first line of a project definition. begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942' r'}"\) = "(.*)", "(.*)", "(.*)"$') # The last line of a project definition. end_project = re.compile('^EndProject$') # The first line of a dependency list. begin_dep = re.compile( r'ProjectSection\(ProjectDependencies\) = postProject$') # The last line of a dependency list. end_dep = re.compile('EndProjectSection$') # A line describing a dependency. dep_line = re.compile(' *({.*}) = ({.*})$') in_deps = False solution = open(solution_file) for line in solution: results = begin_project.search(line) if results: # Hack to remove icu because the diff is too different. if results.group(1).find('icu') != -1: continue # We remove "_gyp" from the names because it helps to diff them. current_project = results.group(1).replace('_gyp', '') projects[current_project] = [results.group(2).replace('_gyp', ''), results.group(3), results.group(2)] dependencies[current_project] = [] continue results = end_project.search(line) if results: current_project = None continue results = begin_dep.search(line) if results: in_deps = True continue results = end_dep.search(line) if results: in_deps = False continue results = dep_line.search(line) if results and in_deps and current_project: dependencies[current_project].append(results.group(1)) continue # Change all dependencies clsid to name instead. for project in dependencies: # For each dependencies in this project new_dep_array = [] for dep in dependencies[project]: # Look for the project name matching this cldis for project_info in projects: if projects[project_info][1] == dep: new_dep_array.append(project_info) dependencies[project] = sorted(new_dep_array) return (projects, dependencies) def PrintDependencies(projects, deps): print "---------------------------------------" print "Dependencies for all projects" print "---------------------------------------" print "-- --" for (project, dep_list) in sorted(deps.items()): print "Project : %s" % project print "Path : %s" % projects[project][0] if dep_list: for dep in dep_list: print " - %s" % dep print "" print "-- --" def PrintBuildOrder(projects, deps): print "---------------------------------------" print "Build order " print "---------------------------------------" print "-- --" built = [] for (project, _) in sorted(deps.items()): if project not in built: BuildProject(project, built, projects, deps) print "-- --" def PrintVCProj(projects): for project in projects: print "-------------------------------------" print "-------------------------------------" print project print project print project print "-------------------------------------" print "-------------------------------------" project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]), projects[project][2])) pretty = pretty_vcproj argv = [ '', project_path, '$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]), ] argv.extend(sys.argv[3:]) pretty.main(argv) def main(): # check if we have exactly 1 parameter. if len(sys.argv) < 2: print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0] return 1 (projects, deps) = ParseSolution(sys.argv[1]) PrintDependencies(projects, deps) PrintBuildOrder(projects, deps) if '--recursive' in sys.argv: PrintVCProj(projects) return 0 if __name__ == '__main__': sys.exit(main())
mthssdrbrg/mopidy-scrobbler
refs/heads/master
tests/test_frontend.py
1
import unittest import mock from mopidy import models import pylast from mopidy_scrobbler import frontend as frontend_lib @mock.patch('mopidy_scrobbler.frontend.pylast', spec=pylast) class FrontendTest(unittest.TestCase): def setUp(self): self.config = { 'scrobbler': { 'username': 'alice', 'password': 'secret', } } self.frontend = frontend_lib.ScrobblerFrontend( self.config, mock.sentinel.core) def test_on_start_creates_lastfm_network(self, pylast_mock): pylast_mock.md5.return_value = mock.sentinel.password_hash self.frontend.on_start() pylast_mock.LastFMNetwork.assert_called_with( api_key=frontend_lib.API_KEY, api_secret=frontend_lib.API_SECRET, username='alice', password_hash=mock.sentinel.password_hash) def test_on_start_stops_actor_on_error(self, pylast_mock): pylast_mock.NetworkError = pylast.NetworkError pylast_mock.LastFMNetwork.side_effect = pylast.NetworkError( None, 'foo') self.frontend.stop = mock.Mock() self.frontend.on_start() self.frontend.stop.assert_called_with() def test_track_playback_started_updates_now_playing(self, pylast_mock): self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) artists = [models.Artist(name='ABC'), models.Artist(name='XYZ')] album = models.Album(name='The Collection') track = models.Track( name='One Two Three', artists=artists, album=album, track_no=3, length=180432, musicbrainz_id='123-456') tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_started(tl_track) self.frontend.lastfm.update_now_playing.assert_called_with( 'ABC, XYZ', 'One Two Three', duration='180', album='The Collection', track_number='3', mbid='123-456') def test_track_playback_started_has_default_values(self, pylast_mock): self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) track = models.Track() tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_started(tl_track) self.frontend.lastfm.update_now_playing.assert_called_with( '', '', duration='0', album='', track_number='0', mbid='') def test_track_playback_started_catches_pylast_error(self, pylast_mock): self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) pylast_mock.ScrobblingError = pylast.ScrobblingError self.frontend.lastfm.update_now_playing.side_effect = ( pylast.ScrobblingError('foo')) track = models.Track() tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_started(tl_track) def test_track_playback_ended_scrobbles_played_track(self, pylast_mock): self.frontend.last_start_time = 123 self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) artists = [models.Artist(name='ABC'), models.Artist(name='XYZ')] album = models.Album(name='The Collection') track = models.Track( name='One Two Three', artists=artists, album=album, track_no=3, length=180432, musicbrainz_id='123-456') tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_ended(tl_track, 150000) self.frontend.lastfm.scrobble.assert_called_with( 'ABC, XYZ', 'One Two Three', '123', duration='180', album='The Collection', track_number='3', mbid='123-456') def test_track_playback_ended_has_default_values(self, pylast_mock): self.frontend.last_start_time = 123 self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) track = models.Track(length=180432) tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_ended(tl_track, 150000) self.frontend.lastfm.scrobble.assert_called_with( '', '', '123', duration='180', album='', track_number='0', mbid='') def test_does_not_scrobble_tracks_shorter_than_30_sec(self, pylast_mock): self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) track = models.Track(length=20432) tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_ended(tl_track, 20432) self.assertEqual(self.frontend.lastfm.scrobble.call_count, 0) def test_does_not_scrobble_if_played_less_than_half(self, pylast_mock): self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) track = models.Track(length=180432) tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_ended(tl_track, 60432) self.assertEqual(self.frontend.lastfm.scrobble.call_count, 0) def test_does_scrobble_if_played_not_half_but_240_sec(self, pylast_mock): self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) track = models.Track(length=880432) tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_ended(tl_track, 241432) self.assertEqual(self.frontend.lastfm.scrobble.call_count, 1) def test_track_playback_ended_catches_pylast_error(self, pylast_mock): self.frontend.lastfm = mock.Mock(spec=pylast.LastFMNetwork) pylast_mock.ScrobblingError = pylast.ScrobblingError self.frontend.lastfm.scrobble.side_effect = ( pylast.ScrobblingError('foo')) track = models.Track(length=180432) tl_track = models.TlTrack(track=track, tlid=17) self.frontend.track_playback_ended(tl_track, 150000)
golden1232004/webrtc_new
refs/heads/master
chromium/src/build/android/pylib/utils/zip_utils.py
24
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=unused-wildcard-import # pylint: disable=wildcard-import from devil.utils.zip_utils import *
Tokyo-Buffalo/tokyosouth
refs/heads/master
env/lib/python3.6/site-packages/twisted/words/xish/utility.py
13
# -*- test-case-name: twisted.words.test.test_xishutil -*- # # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Event Dispatching and Callback utilities. """ from __future__ import absolute_import, division from twisted.python import log from twisted.python.compat import iteritems from twisted.words.xish import xpath class _MethodWrapper(object): """ Internal class for tracking method calls. """ def __init__(self, method, *args, **kwargs): self.method = method self.args = args self.kwargs = kwargs def __call__(self, *args, **kwargs): nargs = self.args + args nkwargs = self.kwargs.copy() nkwargs.update(kwargs) self.method(*nargs, **nkwargs) class CallbackList: """ Container for callbacks. Event queries are linked to lists of callables. When a matching event occurs, these callables are called in sequence. One-time callbacks are removed from the list after the first time the event was triggered. Arguments to callbacks are split spread across two sets. The first set, callback specific, is passed to C{addCallback} and is used for all subsequent event triggers. The second set is passed to C{callback} and is event specific. Positional arguments in the second set come after the positional arguments of the first set. Keyword arguments in the second set override those in the first set. @ivar callbacks: The registered callbacks as mapping from the callable to a tuple of a wrapper for that callable that keeps the callback specific arguments and a boolean that signifies if it is to be called only once. @type callbacks: C{dict} """ def __init__(self): self.callbacks = {} def addCallback(self, onetime, method, *args, **kwargs): """ Add callback. The arguments passed are used as callback specific arguments. @param onetime: If C{True}, this callback is called at most once. @type onetime: C{bool} @param method: The callback callable to be added. @param args: Positional arguments to the callable. @type args: C{list} @param kwargs: Keyword arguments to the callable. @type kwargs: C{dict} """ if not method in self.callbacks: self.callbacks[method] = (_MethodWrapper(method, *args, **kwargs), onetime) def removeCallback(self, method): """ Remove callback. @param method: The callable to be removed. """ if method in self.callbacks: del self.callbacks[method] def callback(self, *args, **kwargs): """ Call all registered callbacks. The passed arguments are event specific and augment and override the callback specific arguments as described above. @note: Exceptions raised by callbacks are trapped and logged. They will not propagate up to make sure other callbacks will still be called, and the event dispatching always succeeds. @param args: Positional arguments to the callable. @type args: C{list} @param kwargs: Keyword arguments to the callable. @type kwargs: C{dict} """ for key, (methodwrapper, onetime) in list(self.callbacks.items()): try: methodwrapper(*args, **kwargs) except: log.err() if onetime: del self.callbacks[key] def isEmpty(self): """ Return if list of registered callbacks is empty. @rtype: C{bool} """ return len(self.callbacks) == 0 class EventDispatcher: """ Event dispatching service. The C{EventDispatcher} allows observers to be registered for certain events that are dispatched. There are two types of events: XPath events and Named events. Every dispatch is triggered by calling L{dispatch} with a data object and, for named events, the name of the event. When an XPath type event is dispatched, the associated object is assumed to be an L{Element<twisted.words.xish.domish.Element>} instance, which is matched against all registered XPath queries. For every match, the respective observer will be called with the data object. A named event will simply call each registered observer for that particular event name, with the data object. Unlike XPath type events, the data object is not restricted to L{Element<twisted.words.xish.domish.Element>}, but can be anything. When registering observers, the event that is to be observed is specified using an L{xpath.XPathQuery} instance or a string. In the latter case, the string can also contain the string representation of an XPath expression. To distinguish these from named events, each named event should start with a special prefix that is stored in C{self.prefix}. It defaults to C{//event/}. Observers registered using L{addObserver} are persistent: after the observer has been triggered by a dispatch, it remains registered for a possible next dispatch. If instead L{addOnetimeObserver} was used to observe an event, the observer is removed from the list of observers after the first observed event. Observers can also be prioritized, by providing an optional C{priority} parameter to the L{addObserver} and L{addOnetimeObserver} methods. Higher priority observers are then called before lower priority observers. Finally, observers can be unregistered by using L{removeObserver}. """ def __init__(self, eventprefix="//event/"): self.prefix = eventprefix self._eventObservers = {} self._xpathObservers = {} self._dispatchDepth = 0 # Flag indicating levels of dispatching # in progress self._updateQueue = [] # Queued updates for observer ops def _getEventAndObservers(self, event): if isinstance(event, xpath.XPathQuery): # Treat as xpath observers = self._xpathObservers else: if self.prefix == event[:len(self.prefix)]: # Treat as event observers = self._eventObservers else: # Treat as xpath event = xpath.internQuery(event) observers = self._xpathObservers return event, observers def addOnetimeObserver(self, event, observerfn, priority=0, *args, **kwargs): """ Register a one-time observer for an event. Like L{addObserver}, but is only triggered at most once. See there for a description of the parameters. """ self._addObserver(True, event, observerfn, priority, *args, **kwargs) def addObserver(self, event, observerfn, priority=0, *args, **kwargs): """ Register an observer for an event. Each observer will be registered with a certain priority. Higher priority observers get called before lower priority observers. @param event: Name or XPath query for the event to be monitored. @type event: C{str} or L{xpath.XPathQuery}. @param observerfn: Function to be called when the specified event has been triggered. This callable takes one parameter: the data object that triggered the event. When specified, the C{*args} and C{**kwargs} parameters to addObserver are being used as additional parameters to the registered observer callable. @param priority: (Optional) priority of this observer in relation to other observer that match the same event. Defaults to C{0}. @type priority: C{int} """ self._addObserver(False, event, observerfn, priority, *args, **kwargs) def _addObserver(self, onetime, event, observerfn, priority, *args, **kwargs): # If this is happening in the middle of the dispatch, queue # it up for processing after the dispatch completes if self._dispatchDepth > 0: self._updateQueue.append(lambda:self._addObserver(onetime, event, observerfn, priority, *args, **kwargs)) return event, observers = self._getEventAndObservers(event) if priority not in observers: cbl = CallbackList() observers[priority] = {event: cbl} else: priorityObservers = observers[priority] if event not in priorityObservers: cbl = CallbackList() observers[priority][event] = cbl else: cbl = priorityObservers[event] cbl.addCallback(onetime, observerfn, *args, **kwargs) def removeObserver(self, event, observerfn): """ Remove callable as observer for an event. The observer callable is removed for all priority levels for the specified event. @param event: Event for which the observer callable was registered. @type event: C{str} or L{xpath.XPathQuery} @param observerfn: Observer callable to be unregistered. """ # If this is happening in the middle of the dispatch, queue # it up for processing after the dispatch completes if self._dispatchDepth > 0: self._updateQueue.append(lambda:self.removeObserver(event, observerfn)) return event, observers = self._getEventAndObservers(event) emptyLists = [] for priority, priorityObservers in iteritems(observers): for query, callbacklist in iteritems(priorityObservers): if event == query: callbacklist.removeCallback(observerfn) if callbacklist.isEmpty(): emptyLists.append((priority, query)) for priority, query in emptyLists: del observers[priority][query] def dispatch(self, obj, event=None): """ Dispatch an event. When C{event} is L{None}, an XPath type event is triggered, and C{obj} is assumed to be an instance of L{Element<twisted.words.xish.domish.Element>}. Otherwise, C{event} holds the name of the named event being triggered. In the latter case, C{obj} can be anything. @param obj: The object to be dispatched. @param event: Optional event name. @type event: C{str} """ foundTarget = False self._dispatchDepth += 1 if event != None: # Named event observers = self._eventObservers match = lambda query, obj: query == event else: # XPath event observers = self._xpathObservers match = lambda query, obj: query.matches(obj) priorities = list(observers.keys()) priorities.sort() priorities.reverse() emptyLists = [] for priority in priorities: for query, callbacklist in iteritems(observers[priority]): if match(query, obj): callbacklist.callback(obj) foundTarget = True if callbacklist.isEmpty(): emptyLists.append((priority, query)) for priority, query in emptyLists: del observers[priority][query] self._dispatchDepth -= 1 # If this is a dispatch within a dispatch, don't # do anything with the updateQueue -- it needs to # wait until we've back all the way out of the stack if self._dispatchDepth == 0: # Deal with pending update operations for f in self._updateQueue: f() self._updateQueue = [] return foundTarget class XmlPipe(object): """ XML stream pipe. Connects two objects that communicate stanzas through an XML stream like interface. Each of the ends of the pipe (sink and source) can be used to send XML stanzas to the other side, or add observers to process XML stanzas that were sent from the other side. XML pipes are usually used in place of regular XML streams that are transported over TCP. This is the reason for the use of the names source and sink for both ends of the pipe. The source side corresponds with the entity that initiated the TCP connection, whereas the sink corresponds with the entity that accepts that connection. In this object, though, the source and sink are treated equally. Unlike Jabber L{XmlStream<twisted.words.protocols.jabber.xmlstream.XmlStream>}s, the sink and source objects are assumed to represent an eternal connected and initialized XML stream. As such, events corresponding to connection, disconnection, initialization and stream errors are not dispatched or processed. @since: 8.2 @ivar source: Source XML stream. @ivar sink: Sink XML stream. """ def __init__(self): self.source = EventDispatcher() self.sink = EventDispatcher() self.source.send = lambda obj: self.sink.dispatch(obj) self.sink.send = lambda obj: self.source.dispatch(obj)
webmull/phantomjs
refs/heads/master
src/breakpad/src/tools/gyp/pylib/gyp/MSVSToolFile.py
138
#!/usr/bin/python2.4 # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import common import xml.dom import xml.dom.minidom #------------------------------------------------------------------------------ class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path): """Initializes the tool file. Args: tool_file_path: Path to the tool file. """ self.tool_file_path = tool_file_path self.doc = None def Create(self, name): """Creates the tool file document. Args: name: Name of the tool file. """ self.name = name # Create XML doc xml_impl = xml.dom.getDOMImplementation() self.doc = xml_impl.createDocument(None, 'VisualStudioToolFile', None) # Add attributes to root element self.n_root = self.doc.documentElement self.n_root.setAttribute('Version', '8.00') self.n_root.setAttribute('Name', self.name) # Add rules section self.n_rules = self.doc.createElement('Rules') self.n_root.appendChild(self.n_rules) def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ n_rule = self.doc.createElement('CustomBuildRule') n_rule.setAttribute('Name', name) n_rule.setAttribute('ExecutionDescription', description) n_rule.setAttribute('CommandLine', cmd) n_rule.setAttribute('Outputs', ';'.join(outputs)) n_rule.setAttribute('FileExtensions', ';'.join(extensions)) n_rule.setAttribute('AdditionalDependencies', ';'.join(additional_dependencies)) self.n_rules.appendChild(n_rule) def Write(self, writer=common.WriteOnDiff): """Writes the tool file.""" f = writer(self.tool_file_path) self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n') f.close() #------------------------------------------------------------------------------
tianyang-li/de-novo-rna-seq-quant-1
refs/heads/master
boost_1_51_0/libs/python/test/bienstman2.py
46
# Copyright David Abrahams 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ''' >>> import bienstman2_ext ''' def run(args = None): import sys import doctest if args is not None: sys.argv = args return doctest.testmod(sys.modules.get(__name__)) if __name__ == '__main__': print "running..." import sys status = run()[0] if (status == 0): print "Done." sys.exit(status)
ake-koomsin/mapnik_nvpr
refs/heads/master
scons/scons-local-2.2.0/SCons/Tool/filesystem.py
14
"""SCons.Tool.filesystem Tool-specific initialization for the filesystem tools. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/filesystem.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo" import SCons from SCons.Tool.install import copyFunc copyToBuilder, copyAsBuilder = None, None def copyto_emitter(target, source, env): """ changes the path of the source to be under the target (which are assumed to be directories. """ n_target = [] for t in target: n_target = n_target + [t.File( str( s ) ) for s in source] return (n_target, source) def copy_action_func(target, source, env): assert( len(target) == len(source) ), "\ntarget: %s\nsource: %s" %(list(map(str, target)),list(map(str, source))) for t, s in zip(target, source): if copyFunc(t.get_path(), s.get_path(), env): return 1 return 0 def copy_action_str(target, source, env): return env.subst_target_source(env['COPYSTR'], 0, target, source) copy_action = SCons.Action.Action( copy_action_func, copy_action_str ) def generate(env): try: env['BUILDERS']['CopyTo'] env['BUILDERS']['CopyAs'] except KeyError, e: global copyToBuilder if copyToBuilder is None: copyToBuilder = SCons.Builder.Builder( action = copy_action, target_factory = env.fs.Dir, source_factory = env.fs.Entry, multi = 1, emitter = [ copyto_emitter, ] ) global copyAsBuilder if copyAsBuilder is None: copyAsBuilder = SCons.Builder.Builder( action = copy_action, target_factory = env.fs.Entry, source_factory = env.fs.Entry ) env['BUILDERS']['CopyTo'] = copyToBuilder env['BUILDERS']['CopyAs'] = copyAsBuilder env['COPYSTR'] = 'Copy file(s): "$SOURCES" to "$TARGETS"' def exists(env): return 1 # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
commercehub-oss/kerb-sts
refs/heads/master
tests/test_awsrole.py
1
# Copyright 2016 Commerce Technologies, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from kerb_sts.awsrole import AWSRole class TestAWSRoleCreation(unittest.TestCase): def test_with_role_as_none(self): is_valid = AWSRole.is_valid(None) self.assertFalse(is_valid) def test_with_empty_string(self): is_valid = AWSRole.is_valid('') self.assertFalse(is_valid) def test_with_malformed_role_string(self): is_valid = AWSRole.is_valid('arn_noseparator_provider') self.assertFalse(is_valid) def test_with_missing_arn_string(self): is_valid = AWSRole.is_valid(',provider') self.assertFalse(is_valid) def test_with_missing_provider_string(self): is_valid = AWSRole.is_valid('arn/role,') self.assertFalse(is_valid) def test_with_valid_strings(self): is_valid = AWSRole.is_valid('arn/role,provider') self.assertTrue(is_valid) def test_parsed_role(self): role = AWSRole('arn/role,provider') self.assertEqual(role.arn, 'arn/role') self.assertEqual(role.provider, 'provider') def test_with_valid_strings(self): role = AWSRole('arn/role,provider') self.assertEqual(role.name, 'role') if __name__ == '__main__': unittest.main()
rosmo/ansible
refs/heads/devel
test/units/modules/network/netscaler/test_netscaler_cs_vserver.py
68
# Copyright (c) 2017 Citrix Systems # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from units.compat.mock import patch, Mock, MagicMock, call from units.modules.utils import set_module_args from .netscaler_module import TestModule, nitro_base_patcher import sys if sys.version_info[:2] != (2, 6): import requests class TestNetscalerCSVserverModule(TestModule): @classmethod def setUpClass(cls): class MockException(Exception): pass cls.MockException = MockException m = MagicMock() cls.cs_vserver_mock = MagicMock() cls.cs_vserver_mock.__class__ = MagicMock(add=Mock()) nssrc_modules_mock = { 'nssrc.com.citrix.netscaler.nitro.resource.config.cs': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver.csvserver': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_cspolicy_binding': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_cspolicy_binding.csvserver_cspolicy_binding': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_lbvserver_binding': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_lbvserver_binding.csvserver_lbvserver_binding': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.ssl': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding.sslvserver_sslcertkey_binding': m, } cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock) cls.nitro_base_patcher = nitro_base_patcher @classmethod def tearDownClass(cls): cls.nitro_base_patcher.stop() cls.nitro_specific_patcher.stop() def setUp(self): super(TestNetscalerCSVserverModule, self).setUp() self.nitro_base_patcher.start() self.nitro_specific_patcher.start() # Setup minimal required arguments to pass AnsibleModule argument parsing def tearDown(self): super(TestNetscalerCSVserverModule, self).tearDown() self.nitro_base_patcher.stop() self.nitro_specific_patcher.stop() def test_graceful_nitro_api_import_error(self): # Stop nitro api patching to cause ImportError set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) self.nitro_base_patcher.stop() self.nitro_specific_patcher.stop() from ansible.modules.network.netscaler import netscaler_cs_vserver self.module = netscaler_cs_vserver result = self.failed() self.assertEqual(result['msg'], 'Could not load nitro python sdk') def test_graceful_nitro_error_on_login(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver class MockException(Exception): def __init__(self, *args, **kwargs): self.errorcode = 0 self.message = '' client_mock = Mock() client_mock.login = Mock(side_effect=MockException) m = Mock(return_value=client_mock) with patch('ansible.modules.network.netscaler.netscaler_cs_vserver.get_nitro_client', m): with patch('ansible.modules.network.netscaler.netscaler_cs_vserver.nitro_exception', MockException): self.module = netscaler_cs_vserver result = self.failed() self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly') def test_graceful_no_connection_error(self): if sys.version_info[:2] == (2, 6): self.skipTest('requests library not available under python2.6') set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver class MockException(Exception): pass client_mock = Mock() attrs = {'login.side_effect': requests.exceptions.ConnectionError} client_mock.configure_mock(**attrs) m = Mock(return_value=client_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, nitro_exception=MockException, ): self.module = netscaler_cs_vserver result = self.failed() self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully') def test_graceful_login_error(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver if sys.version_info[:2] == (2, 6): self.skipTest('requests library not available under python2.6') class MockException(Exception): pass client_mock = Mock() attrs = {'login.side_effect': requests.exceptions.SSLError} client_mock.configure_mock(**attrs) m = Mock(return_value=client_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, nitro_exception=MockException, ): self.module = netscaler_cs_vserver result = self.failed() self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully') def test_save_config_called_on_state_present(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) cs_vserver_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, cs_vserver_exists=Mock(side_effect=[False, True]), cs_vserver_identical=Mock(side_effect=[True]), ensure_feature_is_enabled=Mock(return_value=True), diff_list=Mock(return_value={}), nitro_exception=self.MockException, do_state_change=Mock(return_value=Mock(errorcode=0)), ConfigProxy=Mock(return_value=cs_vserver_proxy_mock), ): self.module = netscaler_cs_vserver self.exited() self.assertIn(call.save_config(), client_mock.mock_calls) def test_save_config_called_on_state_absent(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) cs_vserver_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, cs_vserver_exists=Mock(side_effect=[True, False]), ensure_feature_is_enabled=Mock(return_value=True), ConfigProxy=Mock(return_value=cs_vserver_proxy_mock), ): self.module = netscaler_cs_vserver self.exited() self.assertIn(call.save_config(), client_mock.mock_calls) def test_save_config_not_called_on_state_present(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', save_config=False, )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) cs_vserver_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, cs_vserver_exists=Mock(side_effect=[False, True]), cs_vserver_identical=Mock(side_effect=[True]), diff_list=Mock(return_value={}), ensure_feature_is_enabled=Mock(return_value=True), do_state_change=Mock(return_value=Mock(errorcode=0)), nitro_exception=self.MockException, ConfigProxy=Mock(return_value=cs_vserver_proxy_mock), ): self.module = netscaler_cs_vserver self.exited() self.assertNotIn(call.save_config(), client_mock.mock_calls) def test_save_config_not_called_on_state_absent(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', save_config=False, )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) cs_vserver_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, cs_vserver_exists=Mock(side_effect=[True, False]), ensure_feature_is_enabled=Mock(return_value=True), ConfigProxy=Mock(return_value=cs_vserver_proxy_mock), ): self.module = netscaler_cs_vserver self.exited() self.assertNotIn(call.save_config(), client_mock.mock_calls) def test_new_cs_vserver_execution_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, cs_vserver_exists=Mock(side_effect=[False, True]), cs_vserver_identical=Mock(side_effect=[True]), ensure_feature_is_enabled=Mock(return_value=True), ConfigProxy=config_proxy_mock, nitro_exception=self.MockException, do_state_change=Mock(return_value=Mock(errorcode=0)), ): self.module = netscaler_cs_vserver self.exited() cs_vserver_proxy_mock.assert_has_calls([call.add()]) def test_modified_cs_vserver_execution_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[False, True]), ensure_feature_is_enabled=Mock(return_value=True), nitro_exception=self.MockException, do_state_change=Mock(return_value=Mock(errorcode=0)), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver self.exited() cs_vserver_proxy_mock.assert_has_calls([call.update()]) def test_absent_cs_vserver_execution_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[True, False]), cs_vserver_identical=Mock(side_effect=[False, True]), ensure_feature_is_enabled=Mock(return_value=True), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver self.exited() cs_vserver_proxy_mock.assert_has_calls([call.delete()]) def test_present_cs_vserver_identical_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[True, True]), ensure_feature_is_enabled=Mock(return_value=True), do_state_change=Mock(return_value=Mock(errorcode=0)), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver self.exited() cs_vserver_proxy_mock.assert_not_called() def test_absent_cs_vserver_noop_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[False, False]), cs_vserver_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(return_value=True), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver self.exited() cs_vserver_proxy_mock.assert_not_called() def test_present_cs_vserver_failed_update(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(return_value=True), do_state_change=Mock(return_value=Mock(errorcode=0)), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver result = self.failed() self.assertEqual(result['msg'], 'CS vserver differs from configured') self.assertTrue(result['failed']) def test_present_cs_vserver_failed_create(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[False, False]), cs_vserver_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(return_value=True), do_state_change=Mock(return_value=Mock(errorcode=0)), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver result = self.failed() self.assertEqual(result['msg'], 'CS vserver does not exist') self.assertTrue(result['failed']) def test_present_cs_vserver_update_immutable_attribute(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=['domain']), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(return_value=True), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver result = self.failed() self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']') self.assertTrue(result['failed']) def test_absent_cs_vserver_failed_delete(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(return_value=True), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver result = self.failed() self.assertEqual(result['msg'], 'CS vserver still exists') self.assertTrue(result['failed']) def test_graceful_nitro_exception_state_present(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver class MockException(Exception): def __init__(self, *args, **kwargs): self.errorcode = 0 self.message = '' m = Mock(side_effect=MockException) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', cs_vserver_exists=m, ensure_feature_is_enabled=Mock(return_value=True), nitro_exception=MockException ): self.module = netscaler_cs_vserver result = self.failed() self.assertTrue( result['msg'].startswith('nitro exception'), msg='Nitro exception not caught on operation absent' ) def test_graceful_nitro_exception_state_absent(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_cs_vserver class MockException(Exception): def __init__(self, *args, **kwargs): self.errorcode = 0 self.message = '' m = Mock(side_effect=MockException) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', cs_vserver_exists=m, ensure_feature_is_enabled=Mock(return_value=True), nitro_exception=MockException ): self.module = netscaler_cs_vserver result = self.failed() self.assertTrue( result['msg'].startswith('nitro exception'), msg='Nitro exception not caught on operation absent' ) def test_disabled_state_change_called(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver cs_vserver_proxy_mock = Mock() do_state_change_mock = Mock(return_value=Mock(errorcode=0)) client_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', get_nitro_client=Mock(return_value=client_mock), ConfigProxy=Mock(return_value=cs_vserver_proxy_mock), ensure_feature_is_enabled=Mock(return_value=True), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[True, True]), nitro_exception=self.MockException, do_state_change=do_state_change_mock, ): self.module = netscaler_cs_vserver self.exited() self.assertTrue(len(do_state_change_mock.mock_calls) > 0, msg='Did not call state change') def test_cs_vserver_ssl_called(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', servicetype='SSL', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) ssl_certkey_bindings_sync_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[False, True]), ensure_feature_is_enabled=Mock(return_value=True), ssl_certkey_bindings_identical=Mock(side_effect=[False, True]), ssl_certkey_bindings_sync=ssl_certkey_bindings_sync_mock, do_state_change=Mock(return_value=Mock(errorcode=0)), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver result = self.exited() self.assertTrue(result['changed']) self.assertTrue(ssl_certkey_bindings_sync_mock.called) def test_cs_vserver_ssl_not_called(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_cs_vserver client_mock = Mock() m = Mock(return_value=client_mock) server_proxy_attrs = { 'diff_object.return_value': {}, } cs_vserver_proxy_mock = Mock() cs_vserver_proxy_mock.configure_mock(**server_proxy_attrs) config_proxy_mock = Mock(return_value=cs_vserver_proxy_mock) ssl_certkey_bindings_sync_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_cs_vserver', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), cs_vserver_exists=Mock(side_effect=[True, True]), cs_vserver_identical=Mock(side_effect=[False, True]), ensure_feature_is_enabled=Mock(return_value=True), ssl_certkey_bindings_identical=Mock(side_effect=[False, True]), ssl_certkey_bindings_sync=ssl_certkey_bindings_sync_mock, do_state_change=Mock(return_value=Mock(errorcode=0)), ConfigProxy=config_proxy_mock, ): self.module = netscaler_cs_vserver result = self.exited() self.assertTrue(result['changed']) self.assertFalse(ssl_certkey_bindings_sync_mock.called)
kalahbrown/HueBigSQL
refs/heads/master
desktop/core/ext-py/Django-1.6.10/django/utils/unittest/collector.py
572
import os import sys from django.utils.unittest.loader import defaultTestLoader def collector(): # import __main__ triggers code re-execution __main__ = sys.modules['__main__'] setupDir = os.path.abspath(os.path.dirname(__main__.__file__)) return defaultTestLoader.discover(setupDir)
karstenw/nodebox-pyobjc
refs/heads/master
examples/Extended Application/sklearn/examples/cluster/plot_kmeans_silhouette_analysis.py
1
""" =============================================================================== Selecting the number of clusters with silhouette analysis on KMeans clustering =============================================================================== Silhouette analysis can be used to study the separation distance between the resulting clusters. The silhouette plot displays a measure of how close each point in one cluster is to points in the neighboring clusters and thus provides a way to assess parameters like number of clusters visually. This measure has a range of [-1, 1]. Silhouette coefficients (as these values are referred to as) near +1 indicate that the sample is far away from the neighboring clusters. A value of 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that those samples might have been assigned to the wrong cluster. In this example the silhouette analysis is used to choose an optimal value for ``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5 and 6 are a bad pick for the given data due to the presence of clusters with below average silhouette scores and also due to wide fluctuations in the size of the silhouette plots. Silhouette analysis is more ambivalent in deciding between 2 and 4. Also from the thickness of the silhouette plot the cluster size can be visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to 2, is bigger in size owing to the grouping of the 3 sub clusters into one big cluster. However when the ``n_clusters`` is equal to 4, all the plots are more or less of similar thickness and hence are of similar sizes as can be also verified from the labelled scatter plot on the right. """ from __future__ import print_function from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np # nodebox section if __name__ == '__builtin__': # were in nodebox import os import tempfile W = 800 inset = 20 size(W, 600) plt.cla() plt.clf() plt.close('all') def tempimage(): fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False) fname = fob.name fob.close() return fname imgx = 20 imgy = 0 def pltshow(plt, dpi=150): global imgx, imgy temppath = tempimage() plt.savefig(temppath, dpi=dpi) dx,dy = imagesize(temppath) w = min(W,dx) image(temppath,imgx,imgy,width=w) imgy = imgy + dy + 20 os.remove(temppath) size(W, HEIGHT+dy+40) else: def pltshow(mplpyplot): mplpyplot.show() # nodebox section end print(__doc__) # Generating the sample data from make_blobs # This particular setting has one distinct cluster and 3 clusters placed close # together. X, y = make_blobs(n_samples=500, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1) # For reproducibility range_n_clusters = [2, 3, 4, 5, 6] for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 2nd Plot showing the actual clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k') # Labeling the clusters centers = clusterer.cluster_centers_ # Draw white circles at cluster centers ax2.scatter(centers[:, 0], centers[:, 1], marker='o', c="white", alpha=1, s=200, edgecolor='k') for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50, edgecolor='k') ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle(("Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold') # plt.show() pltshow(plt)
ubiar/odoo
refs/heads/8.0
addons/document/wizard/__init__.py
444
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import document_configuration # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
yantrabuddhi/opencog
refs/heads/master
opencog/nlp/sentiment/basic_sentiment_analysis.py
11
# coding: utf-8 """ basic_sentiment_analysis ~~~~~~~~~~~~~~~~~~~~~~~~ This module contains the code and examples described in http://fjavieralba.com/basic-sentiment-analysis-with-python.html Modified by Ruiting Lian, 2016/7 """ import nltk import yaml import sys import os import re class Splitter(object): def __init__(self): self.nltk_splitter = nltk.data.load('tokenizers/punkt/english.pickle') self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer() def split(self, text): """ input format: a paragraph of text output format: a list of lists of words. e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']] """ sentences = self.nltk_splitter.tokenize(text) tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences] return tokenized_sentences class POSTagger(object): def __init__(self): pass def pos_tag(self, sentences): """ input format: list of lists of words e.g.: [['this', 'is', 'a', 'sentence'], ['this', 'is', 'another', 'one']] output format: list of lists of tagged tokens. Each tagged tokens has a form, a lemma, and a list of tags e.g: [[('this', 'this', ['DT']), ('is', 'be', ['VB']), ('a', 'a', ['DT']), ('sentence', 'sentence', ['NN'])], [('this', 'this', ['DT']), ('is', 'be', ['VB']), ('another', 'another', ['DT']), ('one', 'one', ['CARD'])]] """ pos = [nltk.pos_tag(sentence) for sentence in sentences] #adapt format pos = [[(word, word, [postag]) for (word, postag) in sentence] for sentence in pos] return pos class DictionaryTagger(object): def __init__(self, dictionary_paths): files = [open(path, 'r') for path in dictionary_paths] dictionaries = [yaml.safe_load(dict_file) for dict_file in files] map(lambda x: x.close(), files) self.dictionary = {} self.max_key_size = 0 for curr_dict in dictionaries: for key in curr_dict: if key in self.dictionary: self.dictionary[key].extend(curr_dict[key]) elif key is not False and key is not True: self.dictionary[key] = curr_dict[key] self.max_key_size = max(self.max_key_size, len(key)) elif key is False: # print curr_dict[key] key = "false" self.dictionary[key] = curr_dict [False] self.max_key_size = max(self.max_key_size, len(key)) else: key = "true" self.dictionary[key] = curr_dict [True] self.max_key_size = max(self.max_key_size, len(key)) def tag(self, postagged_sentences): return [self.tag_sentence(sentence) for sentence in postagged_sentences] def tag_sentence(self, sentence, tag_with_lemmas=False): """ the result is only one tagging of all the possible ones. The resulting tagging is determined by these two priority rules: - longest matches have higher priority - search is made from left to right """ tag_sentence = [] N = len(sentence) if self.max_key_size == 0: self.max_key_size = N i = 0 while (i < N): j = min(i + self.max_key_size, N) #avoid overflow tagged = False while (j > i): expression_form = ' '.join([word[0] for word in sentence[i:j]]).lower() expression_lemma = ' '.join([word[1] for word in sentence[i:j]]).lower() if tag_with_lemmas: literal = expression_lemma else: literal = expression_form if literal in self.dictionary: #self.logger.debug("found: %s" % literal) is_single_token = j - i == 1 original_position = i i = j taggings = [tag for tag in self.dictionary[literal]] tagged_expression = (expression_form, expression_lemma, taggings) if is_single_token: #if the tagged literal is a single token, conserve its previous taggings: original_token_tagging = sentence[original_position][2] tagged_expression[2].extend(original_token_tagging) tag_sentence.append(tagged_expression) tagged = True else: j = j - 1 if not tagged: tag_sentence.append(sentence[i]) i += 1 return tag_sentence def value_of(sentiment): if sentiment == 'positive': return 1 if sentiment == 'negative': return -1 return 0 def sentence_score(sentence_tokens, previous_token, acum_score, neg_num): if not sentence_tokens: if(neg_num % 2 == 0): return acum_score else: acum_score *= -1.0 return acum_score else: current_token = sentence_tokens[0] tags = current_token[2] token_score = sum([value_of(tag) for tag in tags]) if previous_token is not None: previous_tags = previous_token[2] if 'inc' in previous_tags: token_score *= 2.0 elif 'dec' in previous_tags: token_score /= 2.0 elif 'inv' in previous_tags: neg_num += 1 return sentence_score(sentence_tokens[1:], current_token, acum_score + token_score, neg_num) def sentiment_score(review): return sum([sentence_score(sentence, None, 0.0, 0) for sentence in review]) configpath = '/usr/local/etc/' path = os.path.join(configpath, 'opencog/dicts'); dictfilenames = ['positive.yml', 'negative.yml', 'inc.yml', 'dec.yml', 'inv.yml'] dicttagger = DictionaryTagger([os.path.join(path, d) for d in dictfilenames]) def sentiment_parse(plain_text): splitter = Splitter() postagger = POSTagger() splitted_sentences = splitter.split(plain_text) pos_tagged_sentences = postagger.pos_tag(splitted_sentences) dict_tagged_sentences = dicttagger.tag(pos_tagged_sentences) score = sentiment_score(dict_tagged_sentences) return score if __name__ == "__main__": #text = """What can I say about this place. The staff of the restaurant is #nice and the eggplant is not bad. Apart from that, very uninspired food, #lack of atmosphere and too expensive. I am a staunch vegetarian and was #sorely dissapointed with the veggie options on the menu. Will be the last #time I visit, I recommend others to avoid.""" text = """His statement is false. So he is a dishonest guy.""" score = sentiment_parse(text) print(score)
pinterest/pymemcache
refs/heads/master
pymemcache/test/conftest.py
1
import os.path import pytest import socket import ssl def pytest_addoption(parser): parser.addoption('--server', action='store', default='localhost', help='memcached server') parser.addoption('--port', action='store', default='11211', help='memcached server port') parser.addoption('--tls-server', action='store', default='localhost', help='TLS memcached server') parser.addoption('--tls-port', action='store', default='11212', help='TLS memcached server port') parser.addoption('--size', action='store', default=1024, help='size of data in benchmarks') parser.addoption('--count', action='store', default=10000, help='number of iterations to run each benchmark') parser.addoption('--keys', action='store', default=20, help='number of keys to use for multi benchmarks') @pytest.fixture(scope='session') def host(request): return request.config.option.server @pytest.fixture(scope='session') def port(request): return int(request.config.option.port) @pytest.fixture(scope='session') def tls_host(request): return request.config.option.tls_server @pytest.fixture(scope='session') def tls_port(request): return int(request.config.option.tls_port) @pytest.fixture(scope='session') def size(request): return int(request.config.option.size) @pytest.fixture(scope='session') def count(request): return int(request.config.option.count) @pytest.fixture(scope='session') def keys(request): return int(request.config.option.keys) @pytest.fixture(scope='session') def pairs(size, keys): return {'pymemcache_test:%d' % i: 'X' * size for i in range(keys)} @pytest.fixture(scope='session') def tls_context(): return ssl.create_default_context( cafile=os.path.join(os.path.dirname(__file__), "certs/ca-root.crt") ) def pytest_generate_tests(metafunc): if 'socket_module' in metafunc.fixturenames: socket_modules = [socket] try: from gevent import socket as gevent_socket except ImportError: print("Skipping gevent (not installed)") else: socket_modules.append(gevent_socket) metafunc.parametrize("socket_module", socket_modules) if 'client_class' in metafunc.fixturenames: from pymemcache.client.base import PooledClient, Client from pymemcache.client.hash import HashClient class HashClientSingle(HashClient): def __init__(self, server, *args, **kwargs): super(HashClientSingle, self).__init__( [server], *args, **kwargs ) metafunc.parametrize( "client_class", [Client, PooledClient, HashClientSingle] )
darkless456/Python
refs/heads/master
sl默认语法.py
1
print ("默认语法")
oesteban/preprocessing-workflow
refs/heads/master
fmriprep/workflows/fieldmap/phdiff.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ .. _sdc_phasediff : Phase-difference B0 estimation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The field inhomogeneity inside the scanner (fieldmap) is proportional to the phase drift between two subsequent :abbr:`GRE (gradient recall echo)` sequence. Fieldmap preprocessing workflow for fieldmap data structure 8.9.1 in BIDS 1.0.0: one phase diff and at least one magnitude image """ from nipype.interfaces import ants, fsl, utility as niu from nipype.pipeline import engine as pe from nipype.workflows.dmri.fsl.utils import siemens2rads, demean_image, \ cleanup_edge_pipeline from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces.bids import ReadSidecarJSON from niworkflows.interfaces.images import IntraModalMerge from niworkflows.interfaces.masks import BETRPT from ...interfaces import Phasediff2Fieldmap, DerivativesDataSink def init_phdiff_wf(omp_nthreads, name='phdiff_wf'): """ Estimates the fieldmap using a phase-difference image and one or more magnitude images corresponding to two or more :abbr:`GRE (Gradient Echo sequence)` acquisitions. The `original code was taken from nipype <https://github.com/nipy/nipype/blob/master/nipype/workflows/dmri/fsl/artifacts.py#L514>`_. .. workflow :: :graph2use: orig :simple_form: yes from fmriprep.workflows.fieldmap.phdiff import init_phdiff_wf wf = init_phdiff_wf(omp_nthreads=1) Outputs:: outputnode.fmap_ref - The average magnitude image, skull-stripped outputnode.fmap_mask - The brain mask applied to the fieldmap outputnode.fmap - The estimated fieldmap in Hz """ workflow = Workflow(name=name) workflow.__desc__ = """\ A deformation field to correct for susceptibility distortions was estimated based on a field map that was co-registered to the BOLD reference, using a custom workflow of *fMRIPrep* derived from D. Greve's `epidewarp.fsl` [script](http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl) and further improvements of HCP Pipelines [@hcppipelines]. """ inputnode = pe.Node(niu.IdentityInterface(fields=['magnitude', 'phasediff']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['fmap', 'fmap_ref', 'fmap_mask']), name='outputnode') def _pick1st(inlist): return inlist[0] # Read phasediff echo times meta = pe.Node(ReadSidecarJSON(bids_validate=False), name='meta', mem_gb=0.01) # Merge input magnitude images magmrg = pe.Node(IntraModalMerge(), name='magmrg') # de-gradient the fields ("bias/illumination artifact") n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True), name='n4', n_procs=omp_nthreads) bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True), name='bet') ds_report_fmap_mask = pe.Node(DerivativesDataSink( desc='brain', suffix='mask'), name='ds_report_fmap_mask', mem_gb=0.01, run_without_submitting=True) # uses mask from bet; outputs a mask # dilate = pe.Node(fsl.maths.MathsCommand( # nan2zeros=True, args='-kernel sphere 5 -dilM'), name='MskDilate') # phase diff -> radians pha2rads = pe.Node(niu.Function(function=siemens2rads), name='pha2rads') # FSL PRELUDE will perform phase-unwrapping prelude = pe.Node(fsl.PRELUDE(), name='prelude') denoise = pe.Node(fsl.SpatialFilter(operation='median', kernel_shape='sphere', kernel_size=3), name='denoise') demean = pe.Node(niu.Function(function=demean_image), name='demean') cleanup_wf = cleanup_edge_pipeline(name="cleanup_wf") compfmap = pe.Node(Phasediff2Fieldmap(), name='compfmap') # The phdiff2fmap interface is equivalent to: # rad2rsec (using rads2radsec from nipype.workflows.dmri.fsl.utils) # pre_fugue = pe.Node(fsl.FUGUE(save_fmap=True), name='ComputeFieldmapFUGUE') # rsec2hz (divide by 2pi) workflow.connect([ (inputnode, meta, [('phasediff', 'in_file')]), (inputnode, magmrg, [('magnitude', 'in_files')]), (magmrg, n4, [('out_avg', 'input_image')]), (n4, prelude, [('output_image', 'magnitude_file')]), (n4, bet, [('output_image', 'in_file')]), (bet, prelude, [('mask_file', 'mask_file')]), (inputnode, pha2rads, [('phasediff', 'in_file')]), (pha2rads, prelude, [('out', 'phase_file')]), (meta, compfmap, [('out_dict', 'metadata')]), (prelude, denoise, [('unwrapped_phase_file', 'in_file')]), (denoise, demean, [('out_file', 'in_file')]), (demean, cleanup_wf, [('out', 'inputnode.in_file')]), (bet, cleanup_wf, [('mask_file', 'inputnode.in_mask')]), (cleanup_wf, compfmap, [('outputnode.out_file', 'in_file')]), (compfmap, outputnode, [('out_file', 'fmap')]), (bet, outputnode, [('mask_file', 'fmap_mask'), ('out_file', 'fmap_ref')]), (inputnode, ds_report_fmap_mask, [('phasediff', 'source_file')]), (bet, ds_report_fmap_mask, [('out_report', 'in_file')]), ]) return workflow
wweiradio/django-guardian
refs/heads/master
guardian/tests/forms_test.py
86
from __future__ import unicode_literals from django.contrib.contenttypes.models import ContentType from django.test import TestCase from guardian.compat import get_user_model from guardian.forms import BaseObjectPermissionsForm class BaseObjectPermissionsFormTests(TestCase): def setUp(self): self.user = get_user_model().objects.create_user( 'joe', '[email protected]', 'joe') self.obj = ContentType.objects.create(name='foo', model='bar', app_label='fake-for-guardian-tests') def test_not_implemented(self): class MyUserObjectPermissionsForm(BaseObjectPermissionsForm): def __init__(formself, user, *args, **kwargs): self.user = user super(MyUserObjectPermissionsForm, formself).__init__(*args, **kwargs) form = MyUserObjectPermissionsForm(self.user, self.obj, {}) self.assertRaises(NotImplementedError, form.save_obj_perms) field_name = form.get_obj_perms_field_name() self.assertTrue(form.is_valid()) self.assertEqual(len(form.cleaned_data[field_name]), 0)
rs2/pandas
refs/heads/master
asv_bench/benchmarks/frame_methods.py
1
import string import warnings import numpy as np from pandas import DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range from .pandas_vb_common import tm class GetNumericData: def setup(self): self.df = DataFrame(np.random.randn(10000, 25)) self.df["foo"] = "bar" self.df["bar"] = "baz" self.df = self.df._consolidate() def time_frame_get_numeric_data(self): self.df._get_numeric_data() class Lookup: def setup(self): self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh")) self.df["foo"] = "bar" self.row_labels = list(self.df.index[::10])[:900] self.col_labels = list(self.df.columns) * 100 self.row_labels_all = np.array( list(self.df.index) * len(self.df.columns), dtype="object" ) self.col_labels_all = np.array( list(self.df.columns) * len(self.df.index), dtype="object" ) def time_frame_fancy_lookup(self): self.df.lookup(self.row_labels, self.col_labels) def time_frame_fancy_lookup_all(self): self.df.lookup(self.row_labels_all, self.col_labels_all) class Reindex: def setup(self): N = 10 ** 3 self.df = DataFrame(np.random.randn(N * 10, N)) self.idx = np.arange(4 * N, 7 * N) self.df2 = DataFrame( { c: { 0: np.random.randint(0, 2, N).astype(np.bool_), 1: np.random.randint(0, N, N).astype(np.int16), 2: np.random.randint(0, N, N).astype(np.int32), 3: np.random.randint(0, N, N).astype(np.int64), }[np.random.randint(0, 4)] for c in range(N) } ) def time_reindex_axis0(self): self.df.reindex(self.idx) def time_reindex_axis1(self): self.df.reindex(columns=self.idx) def time_reindex_both_axes(self): self.df.reindex(index=self.idx, columns=self.idx) def time_reindex_upcast(self): self.df2.reindex(np.random.permutation(range(1200))) class Rename: def setup(self): N = 10 ** 3 self.df = DataFrame(np.random.randn(N * 10, N)) self.idx = np.arange(4 * N, 7 * N) self.dict_idx = {k: k for k in self.idx} self.df2 = DataFrame( { c: { 0: np.random.randint(0, 2, N).astype(np.bool_), 1: np.random.randint(0, N, N).astype(np.int16), 2: np.random.randint(0, N, N).astype(np.int32), 3: np.random.randint(0, N, N).astype(np.int64), }[np.random.randint(0, 4)] for c in range(N) } ) def time_rename_single(self): self.df.rename({0: 0}) def time_rename_axis0(self): self.df.rename(self.dict_idx) def time_rename_axis1(self): self.df.rename(columns=self.dict_idx) def time_rename_both_axes(self): self.df.rename(index=self.dict_idx, columns=self.dict_idx) def time_dict_rename_both_axes(self): self.df.rename(index=self.dict_idx, columns=self.dict_idx) class Iteration: # mem_itertuples_* benchmarks are slow timeout = 120 def setup(self): N = 1000 self.df = DataFrame(np.random.randn(N * 10, N)) self.df2 = DataFrame(np.random.randn(N * 50, 10)) self.df3 = DataFrame( np.random.randn(N, 5 * N), columns=["C" + str(c) for c in range(N * 5)] ) self.df4 = DataFrame(np.random.randn(N * 1000, 10)) def time_items(self): # (monitor no-copying behaviour) if hasattr(self.df, "_item_cache"): self.df._item_cache.clear() for name, col in self.df.items(): pass def time_items_cached(self): for name, col in self.df.items(): pass def time_iteritems_indexing(self): for col in self.df3: self.df3[col] def time_itertuples_start(self): self.df4.itertuples() def time_itertuples_read_first(self): next(self.df4.itertuples()) def time_itertuples(self): for row in self.df4.itertuples(): pass def time_itertuples_to_list(self): list(self.df4.itertuples()) def mem_itertuples_start(self): return self.df4.itertuples() def peakmem_itertuples_start(self): self.df4.itertuples() def mem_itertuples_read_first(self): return next(self.df4.itertuples()) def peakmem_itertuples(self): for row in self.df4.itertuples(): pass def mem_itertuples_to_list(self): return list(self.df4.itertuples()) def peakmem_itertuples_to_list(self): list(self.df4.itertuples()) def time_itertuples_raw_start(self): self.df4.itertuples(index=False, name=None) def time_itertuples_raw_read_first(self): next(self.df4.itertuples(index=False, name=None)) def time_itertuples_raw_tuples(self): for row in self.df4.itertuples(index=False, name=None): pass def time_itertuples_raw_tuples_to_list(self): list(self.df4.itertuples(index=False, name=None)) def mem_itertuples_raw_start(self): return self.df4.itertuples(index=False, name=None) def peakmem_itertuples_raw_start(self): self.df4.itertuples(index=False, name=None) def peakmem_itertuples_raw_read_first(self): next(self.df4.itertuples(index=False, name=None)) def peakmem_itertuples_raw(self): for row in self.df4.itertuples(index=False, name=None): pass def mem_itertuples_raw_to_list(self): return list(self.df4.itertuples(index=False, name=None)) def peakmem_itertuples_raw_to_list(self): list(self.df4.itertuples(index=False, name=None)) def time_iterrows(self): for row in self.df.iterrows(): pass class ToString: def setup(self): self.df = DataFrame(np.random.randn(100, 10)) def time_to_string_floats(self): self.df.to_string() class ToHTML: def setup(self): nrows = 500 self.df2 = DataFrame(np.random.randn(nrows, 10)) self.df2[0] = period_range("2000", periods=nrows) self.df2[1] = range(nrows) def time_to_html_mixed(self): self.df2.to_html() class ToNumpy: def setup(self): N = 10000 M = 10 self.df_tall = DataFrame(np.random.randn(N, M)) self.df_wide = DataFrame(np.random.randn(M, N)) self.df_mixed_tall = self.df_tall.copy() self.df_mixed_tall["foo"] = "bar" self.df_mixed_tall[0] = period_range("2000", periods=N) self.df_mixed_tall[1] = range(N) self.df_mixed_wide = self.df_wide.copy() self.df_mixed_wide["foo"] = "bar" self.df_mixed_wide[0] = period_range("2000", periods=M) self.df_mixed_wide[1] = range(M) def time_to_numpy_tall(self): self.df_tall.to_numpy() def time_to_numpy_wide(self): self.df_wide.to_numpy() def time_to_numpy_mixed_tall(self): self.df_mixed_tall.to_numpy() def time_to_numpy_mixed_wide(self): self.df_mixed_wide.to_numpy() def time_values_tall(self): self.df_tall.values def time_values_wide(self): self.df_wide.values def time_values_mixed_tall(self): self.df_mixed_tall.values def time_values_mixed_wide(self): self.df_mixed_wide.values class Repr: def setup(self): nrows = 10000 data = np.random.randn(nrows, 10) arrays = np.tile(np.random.randn(3, int(nrows / 100)), 100) idx = MultiIndex.from_arrays(arrays) self.df3 = DataFrame(data, index=idx) self.df4 = DataFrame(data, index=np.random.randn(nrows)) self.df_tall = DataFrame(np.random.randn(nrows, 10)) self.df_wide = DataFrame(np.random.randn(10, nrows)) def time_html_repr_trunc_mi(self): self.df3._repr_html_() def time_html_repr_trunc_si(self): self.df4._repr_html_() def time_repr_tall(self): repr(self.df_tall) def time_frame_repr_wide(self): repr(self.df_wide) class MaskBool: def setup(self): data = np.random.randn(1000, 500) df = DataFrame(data) df = df.where(df > 0) self.bools = df > 0 self.mask = isnull(df) def time_frame_mask_bools(self): self.bools.mask(self.mask) def time_frame_mask_floats(self): self.bools.astype(float).mask(self.mask) class Isnull: def setup(self): N = 10 ** 3 self.df_no_null = DataFrame(np.random.randn(N, N)) sample = np.array([np.nan, 1.0]) data = np.random.choice(sample, (N, N)) self.df = DataFrame(data) sample = np.array(list(string.ascii_letters + string.whitespace)) data = np.random.choice(sample, (N, N)) self.df_strings = DataFrame(data) sample = np.array( [ NaT, np.nan, None, np.datetime64("NaT"), np.timedelta64("NaT"), 0, 1, 2.0, "", "abcd", ] ) data = np.random.choice(sample, (N, N)) self.df_obj = DataFrame(data) def time_isnull_floats_no_null(self): isnull(self.df_no_null) def time_isnull(self): isnull(self.df) def time_isnull_strngs(self): isnull(self.df_strings) def time_isnull_obj(self): isnull(self.df_obj) class Fillna: params = ([True, False], ["pad", "bfill"]) param_names = ["inplace", "method"] def setup(self, inplace, method): values = np.random.randn(10000, 100) values[::2] = np.nan self.df = DataFrame(values) def time_frame_fillna(self, inplace, method): self.df.fillna(inplace=inplace, method=method) class Dropna: params = (["all", "any"], [0, 1]) param_names = ["how", "axis"] def setup(self, how, axis): self.df = DataFrame(np.random.randn(10000, 1000)) self.df.iloc[50:1000, 20:50] = np.nan self.df.iloc[2000:3000] = np.nan self.df.iloc[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" def time_dropna(self, how, axis): self.df.dropna(how=how, axis=axis) def time_dropna_axis_mixed_dtypes(self, how, axis): self.df_mixed.dropna(how=how, axis=axis) class Count: params = [0, 1] param_names = ["axis"] def setup(self, axis): self.df = DataFrame(np.random.randn(10000, 1000)) self.df.iloc[50:1000, 20:50] = np.nan self.df.iloc[2000:3000] = np.nan self.df.iloc[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index]) self.df.columns = MultiIndex.from_arrays([self.df.columns, self.df.columns]) self.df_mixed.index = MultiIndex.from_arrays( [self.df_mixed.index, self.df_mixed.index] ) self.df_mixed.columns = MultiIndex.from_arrays( [self.df_mixed.columns, self.df_mixed.columns] ) def time_count_level_multi(self, axis): self.df.count(axis=axis, level=1) def time_count_level_mixed_dtypes_multi(self, axis): self.df_mixed.count(axis=axis, level=1) class Apply: def setup(self): self.df = DataFrame(np.random.randn(1000, 100)) self.s = Series(np.arange(1028.0)) self.df2 = DataFrame({i: self.s for i in range(1028)}) self.df3 = DataFrame(np.random.randn(1000, 3), columns=list("ABC")) def time_apply_user_func(self): self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)]) def time_apply_axis_1(self): self.df.apply(lambda x: x + 1, axis=1) def time_apply_lambda_mean(self): self.df.apply(lambda x: x.mean()) def time_apply_np_mean(self): self.df.apply(np.mean) def time_apply_pass_thru(self): self.df.apply(lambda x: x) def time_apply_ref_by_name(self): self.df3.apply(lambda x: x["A"] + x["B"], axis=1) class Dtypes: def setup(self): self.df = DataFrame(np.random.randn(1000, 1000)) def time_frame_dtypes(self): self.df.dtypes class Equals: def setup(self): N = 10 ** 3 self.float_df = DataFrame(np.random.randn(N, N)) self.float_df_nan = self.float_df.copy() self.float_df_nan.iloc[-1, -1] = np.nan self.object_df = DataFrame("foo", index=range(N), columns=range(N)) self.object_df_nan = self.object_df.copy() self.object_df_nan.iloc[-1, -1] = np.nan self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = ["A"] * len(self.nonunique_cols.columns) self.nonunique_cols_nan = self.nonunique_cols.copy() self.nonunique_cols_nan.iloc[-1, -1] = np.nan def time_frame_float_equal(self): self.float_df.equals(self.float_df) def time_frame_float_unequal(self): self.float_df.equals(self.float_df_nan) def time_frame_nonunique_equal(self): self.nonunique_cols.equals(self.nonunique_cols) def time_frame_nonunique_unequal(self): self.nonunique_cols.equals(self.nonunique_cols_nan) def time_frame_object_equal(self): self.object_df.equals(self.object_df) def time_frame_object_unequal(self): self.object_df.equals(self.object_df_nan) class Interpolate: params = [None, "infer"] param_names = ["downcast"] def setup(self, downcast): N = 10000 # this is the worst case, where every column has NaNs. self.df = DataFrame(np.random.randn(N, 100)) self.df.values[::2] = np.nan self.df2 = DataFrame( { "A": np.arange(0, N), "B": np.random.randint(0, 100, N), "C": np.random.randn(N), "D": np.random.randn(N), } ) self.df2.loc[1::5, "A"] = np.nan self.df2.loc[1::5, "C"] = np.nan def time_interpolate(self, downcast): self.df.interpolate(downcast=downcast) def time_interpolate_some_good(self, downcast): self.df2.interpolate(downcast=downcast) class Shift: # frame shift speedup issue-5609 params = [0, 1] param_names = ["axis"] def setup(self, axis): self.df = DataFrame(np.random.rand(10000, 500)) def time_shift(self, axis): self.df.shift(1, axis=axis) class Nunique: def setup(self): self.df = DataFrame(np.random.randn(10000, 1000)) def time_frame_nunique(self): self.df.nunique() class Duplicated: def setup(self): n = 1 << 20 t = date_range("2015-01-01", freq="S", periods=(n // 64)) xs = np.random.randn(n // 64).round(2) self.df = DataFrame( { "a": np.random.randint(-1 << 8, 1 << 8, n), "b": np.random.choice(t, n), "c": np.random.choice(xs, n), } ) self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T def time_frame_duplicated(self): self.df.duplicated() def time_frame_duplicated_wide(self): self.df2.duplicated() class XS: params = [0, 1] param_names = ["axis"] def setup(self, axis): self.N = 10 ** 4 self.df = DataFrame(np.random.randn(self.N, self.N)) def time_frame_xs(self, axis): self.df.xs(self.N / 2, axis=axis) class SortValues: params = [True, False] param_names = ["ascending"] def setup(self, ascending): self.df = DataFrame(np.random.randn(1000000, 2), columns=list("AB")) def time_frame_sort_values(self, ascending): self.df.sort_values(by="A", ascending=ascending) class SortIndexByColumns: def setup(self): N = 10000 K = 10 self.df = DataFrame( { "key1": tm.makeStringIndex(N).values.repeat(K), "key2": tm.makeStringIndex(N).values.repeat(K), "value": np.random.randn(N * K), } ) def time_frame_sort_values_by_columns(self): self.df.sort_values(by=["key1", "key2"]) class Quantile: params = [0, 1] param_names = ["axis"] def setup(self, axis): self.df = DataFrame(np.random.randn(1000, 3), columns=list("ABC")) def time_frame_quantile(self, axis): self.df.quantile([0.1, 0.5], axis=axis) class GetDtypeCounts: # 2807 def setup(self): self.df = DataFrame(np.random.randn(10, 10000)) def time_frame_get_dtype_counts(self): with warnings.catch_warnings(record=True): self.df.dtypes.value_counts() def time_info(self): self.df.info() class NSort: params = ["first", "last", "all"] param_names = ["keep"] def setup(self, keep): self.df = DataFrame(np.random.randn(100000, 3), columns=list("ABC")) def time_nlargest_one_column(self, keep): self.df.nlargest(100, "A", keep=keep) def time_nlargest_two_columns(self, keep): self.df.nlargest(100, ["A", "B"], keep=keep) def time_nsmallest_one_column(self, keep): self.df.nsmallest(100, "A", keep=keep) def time_nsmallest_two_columns(self, keep): self.df.nsmallest(100, ["A", "B"], keep=keep) class Describe: def setup(self): self.df = DataFrame( { "a": np.random.randint(0, 100, int(1e6)), "b": np.random.randint(0, 100, int(1e6)), "c": np.random.randint(0, 100, int(1e6)), } ) def time_series_describe(self): self.df["a"].describe() def time_dataframe_describe(self): self.df.describe() class SelectDtypes: params = [100, 1000] param_names = ["n"] def setup(self, n): self.df = DataFrame(np.random.randn(10, n)) def time_select_dtypes(self, n): self.df.select_dtypes(include="int") class MemoryUsage: def setup(self): self.df = DataFrame(np.random.randn(100000, 2), columns=list("AB")) self.df2 = self.df.copy() self.df2["A"] = self.df2["A"].astype("object") def time_memory_usage(self): self.df.memory_usage(deep=True) def time_memory_usage_object_dtype(self): self.df2.memory_usage(deep=True) from .pandas_vb_common import setup # noqa: F401 isort:skip
bcheung92/Paperproject
refs/heads/master
gem5/src/arch/arm/ArmISA.py
6
# Copyright (c) 2012-2013, 2015 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andreas Sandberg # Giacomo Gabrielli from m5.params import * from m5.proxy import * from m5.SimObject import SimObject from ArmPMU import ArmPMU # Enum for DecoderFlavour class DecoderFlavour(Enum): vals = ['Generic'] class ArmISA(SimObject): type = 'ArmISA' cxx_class = 'ArmISA::ISA' cxx_header = "arch/arm/isa.hh" system = Param.System(Parent.any, "System this ISA object belongs to") pmu = Param.ArmPMU(NULL, "Performance Monitoring Unit") decoderFlavour = Param.DecoderFlavour('Generic', "Decoder flavour specification") midr = Param.UInt32(0x410fc0f0, "MIDR value") # See section B4.1.93 - B4.1.94 of the ARM ARM # # !ThumbEE | !Jazelle | Thumb | ARM # Note: ThumbEE is disabled for now since we don't support CP14 # config registers and jumping to ThumbEE vectors id_pfr0 = Param.UInt32(0x00000031, "Processor Feature Register 0") # !Timer | Virti | !M Profile | TrustZone | ARMv4 id_pfr1 = Param.UInt32(0x00001011, "Processor Feature Register 1") # See section B4.1.89 - B4.1.92 of the ARM ARM # VMSAv7 support id_mmfr0 = Param.UInt32(0x10201103, "Memory Model Feature Register 0") id_mmfr1 = Param.UInt32(0x00000000, "Memory Model Feature Register 1") # no HW access | WFI stalling | ISB and DSB | # all TLB maintenance | no Harvard id_mmfr2 = Param.UInt32(0x01230000, "Memory Model Feature Register 2") # SuperSec | Coherent TLB | Bcast Maint | # BP Maint | Cache Maint Set/way | Cache Maint MVA id_mmfr3 = Param.UInt32(0x02102211, "Memory Model Feature Register 3") # See section B4.1.84 of ARM ARM # All values are latest for ARMv7-A profile id_isar0 = Param.UInt32(0x02101111, "Instruction Set Attribute Register 0") id_isar1 = Param.UInt32(0x02112111, "Instruction Set Attribute Register 1") id_isar2 = Param.UInt32(0x21232141, "Instruction Set Attribute Register 2") id_isar3 = Param.UInt32(0x01112131, "Instruction Set Attribute Register 3") id_isar4 = Param.UInt32(0x10010142, "Instruction Set Attribute Register 4") id_isar5 = Param.UInt32(0x00000000, "Instruction Set Attribute Register 5") fpsid = Param.UInt32(0x410430a0, "Floating-point System ID Register") # [31:0] is implementation defined id_aa64afr0_el1 = Param.UInt64(0x0000000000000000, "AArch64 Auxiliary Feature Register 0") # Reserved for future expansion id_aa64afr1_el1 = Param.UInt64(0x0000000000000000, "AArch64 Auxiliary Feature Register 1") # 1 CTX CMPs | 2 WRPs | 2 BRPs | !PMU | !Trace | Debug v8-A id_aa64dfr0_el1 = Param.UInt64(0x0000000000101006, "AArch64 Debug Feature Register 0") # Reserved for future expansion id_aa64dfr1_el1 = Param.UInt64(0x0000000000000000, "AArch64 Debug Feature Register 1") # !CRC32 | !SHA2 | !SHA1 | !AES id_aa64isar0_el1 = Param.UInt64(0x0000000000000000, "AArch64 Instruction Set Attribute Register 0") # Reserved for future expansion id_aa64isar1_el1 = Param.UInt64(0x0000000000000000, "AArch64 Instruction Set Attribute Register 1") # 4K | 64K | !16K | !BigEndEL0 | !SNSMem | !BigEnd | 8b ASID | 40b PA id_aa64mmfr0_el1 = Param.UInt64(0x0000000000f00002, "AArch64 Memory Model Feature Register 0") # Reserved for future expansion id_aa64mmfr1_el1 = Param.UInt64(0x0000000000000000, "AArch64 Memory Model Feature Register 1") # !GICv3 CP15 | AdvSIMD | FP | !EL3 | !EL2 | EL1 (AArch64) | EL0 (AArch64) # (no AArch32/64 interprocessing support for now) id_aa64pfr0_el1 = Param.UInt64(0x0000000000000011, "AArch64 Processor Feature Register 0") # Reserved for future expansion id_aa64pfr1_el1 = Param.UInt64(0x0000000000000000, "AArch64 Processor Feature Register 1")
adrienbrault/home-assistant
refs/heads/dev
homeassistant/components/sms/notify.py
6
"""Support for SMS notification services.""" import logging import gammu # pylint: disable=import-error import voluptuous as vol from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService from homeassistant.const import CONF_NAME, CONF_RECIPIENT import homeassistant.helpers.config_validation as cv from .const import DOMAIN, SMS_GATEWAY _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_RECIPIENT): cv.string, vol.Optional(CONF_NAME): cv.string} ) def get_service(hass, config, discovery_info=None): """Get the SMS notification service.""" if SMS_GATEWAY not in hass.data[DOMAIN]: _LOGGER.error("SMS gateway not found, cannot initialize service") return gateway = hass.data[DOMAIN][SMS_GATEWAY] if discovery_info is None: number = config[CONF_RECIPIENT] else: number = discovery_info[CONF_RECIPIENT] return SMSNotificationService(gateway, number) class SMSNotificationService(BaseNotificationService): """Implement the notification service for SMS.""" def __init__(self, gateway, number): """Initialize the service.""" self.gateway = gateway self.number = number async def async_send_message(self, message="", **kwargs): """Send SMS message.""" smsinfo = { "Class": -1, "Unicode": False, "Entries": [{"ID": "ConcatenatedTextLong", "Buffer": message}], } try: # Encode messages encoded = gammu.EncodeSMS(smsinfo) except gammu.GSMError as exc: _LOGGER.error("Encoding message %s failed: %s", message, exc) return # Send messages for encoded_message in encoded: # Fill in numbers encoded_message["SMSC"] = {"Location": 1} encoded_message["Number"] = self.number try: # Actually send the message await self.gateway.send_sms_async(encoded_message) except gammu.GSMError as exc: _LOGGER.error("Sending to %s failed: %s", self.number, exc)
felipenaselva/felipe.repository
refs/heads/master
script.module.placenta/lib/resources/lib/modules/jsunpack.py
3
""" resolveurl XBMC Addon Copyright (C) 2013 Bstrdsmkr This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Adapted for use in xbmc from: https://github.com/einars/js-beautify/blob/master/python/jsbeautifier/unpackers/packer.py usage: if detect(some_string): unpacked = unpack(some_string) Unpacker for Dean Edward's p.a.c.k.e.r """ import re def detect(source): """Detects whether `source` is P.A.C.K.E.R. coded.""" source = source.replace(' ', '') if re.search('eval\(function\(p,a,c,k,e,(?:r|d)', source): return True else: return False def unpack(source): """Unpacks P.A.C.K.E.R. packed js code.""" payload, symtab, radix, count = _filterargs(source) if count != len(symtab): raise UnpackingError('Malformed p.a.c.k.e.r. symtab.') try: unbase = Unbaser(radix) except TypeError: raise UnpackingError('Unknown p.a.c.k.e.r. encoding.') def lookup(match): """Look up symbols in the synthetic symtab.""" word = match.group(0) return symtab[unbase(word)] or word source = re.sub(r'\b\w+\b', lookup, payload) return _replacestrings(source) def _filterargs(source): """Juice from a source file the four args needed by decoder.""" argsregex = (r"}\s*\('(.*)',\s*(.*?),\s*(\d+),\s*'(.*?)'\.split\('\|'\)") args = re.search(argsregex, source, re.DOTALL).groups() try: payload, radix, count, symtab = args radix = 36 if not radix.isdigit() else int(radix) return payload, symtab.split('|'), radix, int(count) except ValueError: raise UnpackingError('Corrupted p.a.c.k.e.r. data.') def _replacestrings(source): """Strip string lookup table (list) and replace values in source.""" match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL) if match: varname, strings = match.groups() startpoint = len(match.group(0)) lookup = strings.split('","') variable = '%s[%%d]' % varname for index, value in enumerate(lookup): source = source.replace(variable % index, '"%s"' % value) return source[startpoint:] return source class Unbaser(object): """Functor for a given base. Will efficiently convert strings to natural numbers.""" ALPHABET = { 62: '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', 95: (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' '[\]^_`abcdefghijklmnopqrstuvwxyz{|}~') } def __init__(self, base): self.base = base # If base can be handled by int() builtin, let it do it for us if 2 <= base <= 36: self.unbase = lambda string: int(string, base) else: if base < 62: self.ALPHABET[base] = self.ALPHABET[62][0:base] elif 62 < base < 95: self.ALPHABET[base] = self.ALPHABET[95][0:base] # Build conversion dictionary cache try: self.dictionary = dict((cipher, index) for index, cipher in enumerate(self.ALPHABET[base])) except KeyError: raise TypeError('Unsupported base encoding.') self.unbase = self._dictunbaser def __call__(self, string): return self.unbase(string) def _dictunbaser(self, string): """Decodes a value to an integer.""" ret = 0 for index, cipher in enumerate(string[::-1]): ret += (self.base ** index) * self.dictionary[cipher] return ret class UnpackingError(Exception): """Badly packed source or general error. Argument is a meaningful description.""" pass if __name__ == "__main__": # test = '''eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('4(\'30\').2z({2y:\'5://a.8.7/i/z/y/w.2x\',2w:{b:\'2v\',19:\'<p><u><2 d="20" c="#17">2u 19.</2></u><16/><u><2 d="18" c="#15">2t 2s 2r 2q.</2></u></p>\',2p:\'<p><u><2 d="20" c="#17">2o 2n b.</2></u><16/><u><2 d="18" c="#15">2m 2l 2k 2j.</2></u></p>\',},2i:\'2h\',2g:[{14:"11",b:"5://a.8.7/2f/13.12"},{14:"2e",b:"5://a.8.7/2d/13.12"},],2c:"11",2b:[{10:\'2a\',29:\'5://v.8.7/t-m/m.28\'},{10:\'27\'}],26:{\'25-3\':{\'24\':{\'23\':22,\'21\':\'5://a.8.7/i/z/y/\',\'1z\':\'w\',\'1y\':\'1x\'}}},s:\'5://v.8.7/t-m/s/1w.1v\',1u:"1t",1s:"1r",1q:\'1p\',1o:"1n",1m:"1l",1k:\'5\',1j:\'o\',});l e;l k=0;l 6=0;4().1i(9(x){f(6>0)k+=x.r-6;6=x.r;f(q!=0&&k>=q){6=-1;4().1h();4().1g(o);$(\'#1f\').j();$(\'h.g\').j()}});4().1e(9(x){6=-1});4().1d(9(x){n(x)});4().1c(9(){$(\'h.g\').j()});9 n(x){$(\'h.g\').1b();f(e)1a;e=1;}',36,109,'||font||jwplayer|http|p0102895|me|vidto|function|edge3|file|color|size|vvplay|if|video_ad|div||show|tt102895|var|player|doPlay|false||21600|position|skin|test||static|1y7okrqkv4ji||00020|01|type|360p|mp4|video|label|FFFFFF|br|FF0000||deleted|return|hide|onComplete|onPlay|onSeek|play_limit_box|setFullscreen|stop|onTime|dock|provider|391|height|650|width|over|controlbar|5110|duration|uniform|stretching|zip|stormtrooper|213|frequency|prefix||path|true|enabled|preview|timeslidertooltipplugin|plugins|html5|swf|src|flash|modes|hd_default|3bjhohfxpiqwws4phvqtsnolxocychumk274dsnkblz6sfgq6uz6zt77gxia|240p|3bjhohfxpiqwws4phvqtsnolxocychumk274dsnkba36sfgq6uzy3tv2oidq|hd|original|ratio|broken|is|link|Your|such|No|nofile|more|any|availabe|Not|File|OK|previw|jpg|image|setup|flvplayer'.split('|')))''' # test = '''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('y.x(A(\'%0%f%b%9%1%d%8%8%o%e%B%c%0%e%d%0%f%w%1%7%3%2%p%d%1%n%2%1%c%0%t%0%f%7%8%8%d%5%6%1%7%e%b%l%7%1%2%e%9%q%c%0%6%1%z%2%0%f%b%1%9%c%0%s%6%6%l%G%4%4%5%5%5%k%b%7%5%8%o%i%2%k%6%i%4%2%3%p%2%n%4%5%7%6%9%s%4%j%q%a%h%a%3%a%E%a%3%D%H%9%K%C%I%m%r%g%h%L%v%g%u%F%r%g%3%J%3%j%3%m%h%4\'));',48,48,'22|72|65|6d|2f|77|74|61|6c|63|4e|73|3d|6f|6e|20|4d|32|76|59|2e|70|51|64|69|62|79|31|68|30|7a|34|66|write|document|75|unescape|67|4f|5a|57|55|3a|44|47|4a|78|49'.split('|'),0,{}))''' # test = '''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('x.w(z(\'%1%f%9%b%0%d%7%7%m%e%A%c%1%e%d%1%f%v%0%3%i%2%o%d%0%s%2%0%c%1%q%1%f%3%7%7%d%6%5%0%3%e%9%l%3%0%2%e%b%g%c%1%5%0%y%2%1%f%9%0%b%c%1%r%5%5%l%E%4%4%6%6%6%n%9%3%6%7%m%k%2%n%5%k%4%2%i%o%2%s%4%6%3%5%b%r%4%8%D%h%C%a%F%8%H%B%I%h%i%a%g%8%u%a%q%j%t%j%g%8%t%h%p%j%p%a%G%4\'));',45,45,'72|22|65|61|2f|74|77|6c|5a|73|55|63|3d|6f|6e|20|79|59|6d|4d|76|70|69|2e|62|7a|30|68|64|44|54|66|write|document|75|unescape|67|51|32|6a|3a|35|5f|47|34'.split('|'),0,{}))''' #test = '''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('q.r(s(\'%h%t%a%p%u%6%c%n%0%5%l%4%2%4%7%j%0%8%1%o%b%3%7%m%1%8%a%7%b%3%d%6%1%f%0%v%1%5%D%9%0%5%c%g%0%4%A%9%0%f%k%z%2%8%1%C%2%i%d%6%2%3%k%j%2%3%y%e%x%w%g%B%E%F%i%h%e\'));',42,42,'5a|4d|4f|54|6a|44|33|6b|57|7a|56|4e|68|55|3e|47|69|65|6d|32|45|46|31|6f|30|75|document|write|unescape|6e|62|6c|2f|3c|22|79|63|66|78|59|72|61'.split('|'),0,{}))''' #test='''eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('8("39").38({37:[{p:"4://1.3.2/36/v.35",34:"33"}],32:"4://1.3.2/i/31/30/2z.2y",2x:"2w",2v:"q%",2u:"q%",2t:"16:9",2s:"2r",2q:"2p",2o:[{p:"4://3.2/j?h=2n&g=7",2m:"2l"}],2k:{2j:\'#2i\',2h:14,2g:"2f",2e:0},"2d":{2c:"%2b 2a%o%29%28%27%26.2%25-7.a%22 24%e 23%e 21%e 20%1z 1y%o%1x%22 1w%1v 1u%1t%n%1s%1r%n",1q:"4://3.2/7.a"},1p:"1o",1n:"1m.1l | 1k 1j 1i 1h 1g ",1f:"4://3.2"});1e b,d;8().1d(6(x){k(5>0&&x.1c>=5&&d!=1){d=1;$(\'c.1b\').1a(\'19\')}});8().18(6(x){m(x)});8().17(6(){$(\'c.l\').15()});6 m(x){$(\'c.l\').13();k(b)12;b=1;$.11(\'4://3.2/j?h=10&g=7&z=y-w-u-t-s\',6(f){$(\'#r\').a(f)})}',36,118,'||tv|putload|https||function|3t1tlhv83pqr|jwplayer||html|vvplay|div|vvad|3D0|data|file_code|op||dl|if|video_ad|doPlay|3E|3D|file|100|fviews|2b320c6ae13efa71a060a7076ca296c2|1485454645|106||81||32755|hash|view|get|return|hide||show||onComplete|onPlay|slow|fadeIn|video_ad_fadein|position|onTime|var|aboutlink|Home|Sharing|And|Uploading|Video|TV|PUTLOAD|abouttext|vapor|skin|link|2FIFRAME|3C|3D500|HEIGHT|3D900|WIDTH|22true|allowfullscreen|3DNO|SCROLLING|MARGINHEIGHT||MARGINWIDTH|FRAMEBORDER|2Fembed|2Fputload|2F|3A|22http|SRC|3CIFRAME|code|sharing|backgroundOpacity|Verdana|fontFamily|fontSize|FFFFFF|color|captions|thumbnails|kind|get_slides|tracks|start|startparam|true|androidhls|aspectratio|height|width|4548|duration|jpg|3t1tlhv83pqr_xt|00006|01|image|480|label|mp4|ykgip2nkk62birmpnhxgrirvpya7wwl2t74yvewln767vcte7devr4is3yta|sources|setup|vplayer'.split('|')))''' #test='''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('w.C(a(){m(d(\'u\')==e){}},B);a p(4,h,f,6,7){2 3=q r();3.t(3.s()+(f*g*g*o));2 8="; 8="+3.D();k.l=4+"="+h+8+";7="+7+"; 6="+6}a d(4){2 b=4+"=";2 9=k.l.z(\';\');y(2 i=0;i<9.5;i++){2 c=9[i];x(c.A(0)==\' \')c=c.j(1,c.5);m(c.v(b)==0)n c.j(b.5,c.5)}n e}',40,40,'||var|date|name|length|path|domain|expires|ca|function|nameEQ||getcookie|null|hours|60|value||substring|document|cookie|if|return|1000|setcookie|new|Date|getTime|setTime|09ffa5fd853pbe2faac20a3e74138ea72a4807d21f2b|indexOf|window|while|for|split|charAt|5000|setTimeout|toGMTString'.split('|'),0,{}))''' #test='''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('j.G=j.w(3(){2(7.c("B-C").k>0){2(7.c("B-C")[0].Y=="X"){j.Z(G);9=7.c("B-C")[0];o=7.c("4").k;n=7.c("v").k;2(o>0){4=7.c("4")[0];4.H=3(){h(4,"e")};4.I=3(){d(4,"e")};9.J(7.F(\'4\'))}7.11.12=3(){2(n>0){d(b,"g")}2(o>0){d(4,"g")}};2(n>0){b=7.c("v")[0];b.H=3(){h(b,"e")};b.I=3(){d(b,"e")};9.J(7.F(\'v\'))}j.w(3(){2(n>0){2(8(9,"f-p")==m){d(b,"g")}l 2(8(9,"f-p")==i&&8(9,"f-E-M")==m&&8(b,"e")==i){h(b,"g")}}2(o>0){j.w(3(){2(8(9,"f-p")==m){d(4,"g")}l 2(8(9,"f-p")==i&&8(9,"f-E-M")==m&&8(4,"e")==i){h(4,"g")}},A)}},A)}}},A);3 8(S,T){y(\' \'+S.5+\' \').Q(\' \'+T+\' \')>-1}3 h(6,5){2(6.q){6.q.14(5)}l 2(!O(6,5)){6.5+=" "+5}}3 d(6,5){2(6.q){6.q.N(5)}l 2(O(6,5)){z P=D V(\'(\\s|^)\'+5+\'(\\s|$)\');6.5=6.5.U(P,\' \')}}W.10.N=3(){z x,a=16,L=a.k,u;R(L&&t.k){x=a[--L];R((u=t.Q(x))!==-1){t.17(u,1)}}y t};3 18(K){z r=D 19();r.1a("1c",K,i);r.1b(13);y r.15}',62,75,'||if|function|infobar|className|el|document|hasThisClass|videodiv||changerdiv|getElementsByClassName|removeClass|hover|vjs|hide|addClass|false|window|length|else|true|ischangerhere|isinfohere|paused|classList|xmlHttp||this|ax|changer|setInterval|what|return|var|500|video|js|new|user|getElementById|checkforvideo|onmouseenter|onmouseleave|appendChild|theUrl||inactive|remove|hasClass|reg|indexOf|while|element|cls|replace|RegExp|Array|DIV|tagName|clearInterval|prototype|body|onmousemove|null|add|responseText|arguments|splice|httpGet|XMLHttpRequest|open|send|GET'.split('|'),0,{}))''' #test='''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('z.A(9(){p(t(\'q\')==l){7(\'B\',\'a\',6,\'/\',\'.f.g\');7(\'q\',\'a\',1,\'/\',\'.f.g\');7(\'C\',\'a\',2,\'/\',\'.f.g\')}},v);9 7(4,k,m,b,h){3 5=D J();5.K(5.I()+(m*r*r*G));3 d="; d="+5.F();u.s=4+"="+k+d+";h="+h+"; b="+b}9 t(4){3 j=4+"=";3 e=u.s.x(\';\');w(3 i=0;i<e.8;i++){3 c=e[i];y(c.E(0)==\' \')c=c.n(1,c.8);p(c.H(j)==0)o c.n(j.8,c.8)}o l}',47,47,'|||var|name|date||setcookie|length|function|OK|path||expires|ca|vkpass|com|domain||nameEQ|value|null|hours|substring|return|if|09ffa5fd853pbe2faac20a3e74138ea72a4807d21f2b|60|cookie|getcookie|document|5000|for|split|while|window|setTimeout|09ffa5fd853bbe2faac20a3e74138ea72a4807d21f2b|09ffa5fd853rbe2faac20a3e74138ea72a4807d21f2b|new|charAt|toGMTString|1000|indexOf|getTime|Date|setTime'.split('|'),0,{}))''' #test='''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('8(2.C.B<A){8(2.9("h").e>0){2.9("h")[0].a.b=\'c\'}}H i(j,6){4 1=G F();4 5="";4 7=[];4 3;z(3 y 6){7.r(l(3)+\'=\'+l(6[3]))}5=7.w(\'&\').v(/%u/g,\'+\');1.J(\'I\',j);1.k(\'m-Y\',\'11/x-X-Z-10\');1.k(\'m-W\',5.e);1.M(5)}2.K(\'O\').a.b=\'c\';i(\'t://Q.R.n/S\',{P:\'L//N/U+V/12|T=\',s:\'q://o.p.n/E/d//D\',f:2.f});',62,65,'|XHR|document|name|var|urlEncodedData|data|urlEncodedDataPairs|if|getElementsByClassName|style|display|block||length|referrer||close_min|sendPost|link|setRequestHeader|encodeURIComponent|Content|com|drive|google|https|push|video_link|http|20|replace|join||in|for|400|scrollHeight|body|view|file|XMLHttpRequest|new|function|POST|open|getElementById|VVf0YnFvAZTWk1Yyq8kDH7o95L2Ywk8On80uA8aLu8FO0p42wWghKPQiym3BBhGDfBIyrfBRgdg613iNJucCNgamYPGyfh|send|vQItcR|ntfound|id|cdn25|vkpass|broken|hfPNqJY8djW1iNqYEMRb8064DovKJXBiunE26FSt3eI|wUZ||Length|www|Type|form|urlencoded|application|pdyfE0GfU9E6XxutQi2'.split('|'),0,{}))''' #test='''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}('f.w(\'<0 v u="" t="0" x="0-y b-B-A" s C="p" l="k:i%;n:i%;"><7 8="1://9.2.3/a/r" c="0/6" 5="q" 4="o" /><7 8="1://9.2.3/a/m" c="0/6" 5="z" 4="R" /><7 8="1://9.2.3/a/Q" c="0/6" 5="D" 4="P" /><7 8="1://9.2.3/a/U" c="0/6" 5="M" 4="F" /></0>\');d j="",h="1://2.3/";E.b=H(\'0\');b.I();b.K({J:j,L:h});d g=f.G("0");g.N(\'V\',S(e){e.O()},T);',58,58,'video|http|vkpass|com|res|label|mp4|source|src|cdn25|hop|vjs|type|var||document|myVideo|vlolink|100|vlofile|width|style|40d5a90cb487138ecd4711cf7fffe448|height|360|auto|360p|bec4ddbc646483156b9f434221520d8f|controls|id|poster|crossdomain|write|class|js|720p|skin|default|preload|1080p|window|480|getElementById|videojs|videoJsResolutionSwitcher|image|logobrand|destination|480p|addEventListener|preventDefault|1080|cb9eed6a123ac3856f87d4a88b89d939|720|function|false|7f553afd1a8ddd486d40a15a4b9c12c0|contextmenu'.split('|'),0,{}))''' test='''eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\w+'};c=1};while(c--){if(k[c]){p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c])}}return p}(';k N=\'\',2c=\'1T\';1P(k i=0;i<12;i++)N+=2c.V(C.K(C.H()*2c.E));k 2j=8,33=5O,2H=5Z,2e=5Y,36=B(t){k o=!1,i=B(){z(q.1k){q.2g(\'2Q\',e);D.2g(\'29\',e)}P{q.2A(\'2v\',e);D.2A(\'24\',e)}},e=B(){z(!o&&(q.1k||5V.38===\'29\'||q.2F===\'2n\')){o=!0;i();t()}};z(q.2F===\'2n\'){t()}P z(q.1k){q.1k(\'2Q\',e);D.1k(\'29\',e)}P{q.2u(\'2v\',e);D.2u(\'24\',e);k n=!1;2N{n=D.54==5I&&q.27}2R(a){};z(n&&n.2i){(B r(){z(o)F;2N{n.2i(\'16\')}2R(e){F 5v(r,50)};o=!0;i();t()})()}}};D[\'\'+N+\'\']=(B(){k t={t$:\'1T+/=\',5r:B(e){k d=\'\',l,a,i,s,c,r,n,o=0;e=t.e$(e);1b(o<e.E){l=e.14(o++);a=e.14(o++);i=e.14(o++);s=l>>2;c=(l&3)<<4|a>>4;r=(a&15)<<2|i>>6;n=i&63;z(2O(a)){r=n=64}P z(2O(i)){n=64};d=d+U.t$.V(s)+U.t$.V(c)+U.t$.V(r)+U.t$.V(n)};F d},13:B(e){k n=\'\',l,c,d,s,a,i,r,o=0;e=e.1x(/[^A-5l-5o-9\\+\\/\\=]/g,\'\');1b(o<e.E){s=U.t$.1F(e.V(o++));a=U.t$.1F(e.V(o++));i=U.t$.1F(e.V(o++));r=U.t$.1F(e.V(o++));l=s<<2|a>>4;c=(a&15)<<4|i>>2;d=(i&3)<<6|r;n=n+O.T(l);z(i!=64){n=n+O.T(c)};z(r!=64){n=n+O.T(d)}};n=t.n$(n);F n},e$:B(t){t=t.1x(/;/g,\';\');k n=\'\';1P(k o=0;o<t.E;o++){k e=t.14(o);z(e<1C){n+=O.T(e)}P z(e>5x&&e<5J){n+=O.T(e>>6|5F);n+=O.T(e&63|1C)}P{n+=O.T(e>>12|2E);n+=O.T(e>>6&63|1C);n+=O.T(e&63|1C)}};F n},n$:B(t){k o=\'\',e=0,n=5E=1B=0;1b(e<t.E){n=t.14(e);z(n<1C){o+=O.T(n);e++}P z(n>4V&&n<2E){1B=t.14(e+1);o+=O.T((n&31)<<6|1B&63);e+=2}P{1B=t.14(e+1);2o=t.14(e+2);o+=O.T((n&15)<<12|(1B&63)<<6|2o&63);e+=3}};F o}};k r=[\'6m==\',\'6w\',\'6F=\',\'6y\',\'6j\',\'5X=\',\'5U=\',\'6b=\',\'68\',\'69\',\'3b=\',\'6l=\',\'48\',\'49\',\'47=\',\'46\',\'43=\',\'44=\',\'45=\',\'4a=\',\'4b=\',\'4g=\',\'4h==\',\'4f==\',\'4e==\',\'4c==\',\'4d=\',\'42\',\'41\',\'3Q\',\'3R\',\'3P\',\'3O\',\'3L==\',\'3M=\',\'3N=\',\'3S=\',\'3T==\',\'3Z=\',\'40\',\'3Y=\',\'3X=\',\'3U==\',\'3V=\',\'3W==\',\'4i==\',\'4j=\',\'4G=\',\'4H\',\'4F==\',\'4E==\',\'4B\',\'4C==\',\'4D=\'],y=C.K(C.H()*r.E),W=t.13(r[y]),b=W,M=1,p=\'#4I\',a=\'#4J\',g=\'#4O\',w=\'#4P\',Q=\'\',Y=\'4N!\',v=\'4M 4K 4L 4A\\\'4z 4p 4q 2I 2W. 3K\\\'s 4n. 4k 4l\\\'t?\',f=\'4m 4r 4s-4x, 4y 4w\\\'t 4v 4t U 4u 4Q.\',s=\'I 3A, I 3a 3j 3i 2I 2W. 3e 3g 3h!\',o=0,u=1,n=\'3c.3d\',l=0,L=e()+\'.2V\';B h(t){z(t)t=t.1Q(t.E-15);k n=q.2K(\'34\');1P(k o=n.E;o--;){k e=O(n[o].1J);z(e)e=e.1Q(e.E-15);z(e===t)F!0};F!1};B m(t){z(t)t=t.1Q(t.E-15);k e=q.3f;x=0;1b(x<e.E){1o=e[x].1R;z(1o)1o=1o.1Q(1o.E-15);z(1o===t)F!0;x++};F!1};B e(t){k o=\'\',e=\'1T\';t=t||30;1P(k n=0;n<t;n++)o+=e.V(C.K(C.H()*e.E));F o};B i(o){k i=[\'3J\',\'3C==\',\'3B\',\'3k\',\'35\',\'3y==\',\'3z=\',\'3D==\',\'3E=\',\'3I==\',\'3H==\',\'3G==\',\'3F\',\'3x\',\'3w\',\'35\'],a=[\'2P=\',\'3p==\',\'3o==\',\'3n==\',\'3l=\',\'3m\',\'3q=\',\'3r=\',\'2P=\',\'3v\',\'3u==\',\'3t\',\'3s==\',\'4o==\',\'5L==\',\'6a=\'];x=0;1K=[];1b(x<o){c=i[C.K(C.H()*i.E)];d=a[C.K(C.H()*a.E)];c=t.13(c);d=t.13(d);k r=C.K(C.H()*2)+1;z(r==1){n=\'//\'+c+\'/\'+d}P{n=\'//\'+c+\'/\'+e(C.K(C.H()*20)+4)+\'.2V\'};1K[x]=26 1V();1K[x].23=B(){k t=1;1b(t<7){t++}};1K[x].1J=n;x++}};B Z(t){};F{2B:B(t,a){z(66 q.J==\'1U\'){F};k o=\'0.1\',a=b,e=q.1c(\'1s\');e.1m=a;e.j.1i=\'1I\';e.j.16=\'-1h\';e.j.X=\'-1h\';e.j.1u=\'2d\';e.j.11=\'67\';k d=q.J.2L,r=C.K(d.E/2);z(r>15){k n=q.1c(\'2a\');n.j.1i=\'1I\';n.j.1u=\'1q\';n.j.11=\'1q\';n.j.X=\'-1h\';n.j.16=\'-1h\';q.J.6c(n,q.J.2L[r]);n.1d(e);k i=q.1c(\'1s\');i.1m=\'2M\';i.j.1i=\'1I\';i.j.16=\'-1h\';i.j.X=\'-1h\';q.J.1d(i)}P{e.1m=\'2M\';q.J.1d(e)};l=6h(B(){z(e){t((e.1X==0),o);t((e.1W==0),o);t((e.1O==\'2X\'),o);t((e.1N==\'2z\'),o);t((e.1E==0),o)}P{t(!0,o)}},21)},1S:B(e,m){z((e)&&(o==0)){o=1;o$.6g([\'6f\',\'6d\',\'6e\',1U,1U,!0]);D[\'\'+N+\'\'].1r();D[\'\'+N+\'\'].1S=B(){F}}P{k f=t.13(\'62\'),c=q.61(f);z((c)&&(o==0)){z((33%3)==0){k d=\'5R=\';d=t.13(d);z(h(d)){z(c.1G.1x(/\\s/g,\'\').E==0){o=1;D[\'\'+N+\'\'].1r()}}}};k p=!1;z(o==0){z((2H%3)==0){z(!D[\'\'+N+\'\'].2C){k l=[\'5S==\',\'5Q==\',\'4R=\',\'5P=\',\'5N=\'],s=l.E,a=l[C.K(C.H()*s)],n=a;1b(a==n){n=l[C.K(C.H()*s)]};a=t.13(a);n=t.13(n);i(C.K(C.H()*2)+1);k r=26 1V(),u=26 1V();r.23=B(){i(C.K(C.H()*2)+1);u.1J=n;i(C.K(C.H()*2)+1)};u.23=B(){o=1;i(C.K(C.H()*3)+1);D[\'\'+N+\'\'].1r()};r.1J=a;z((2e%3)==0){r.24=B(){z((r.11<8)&&(r.11>0)){D[\'\'+N+\'\'].1r()}}};i(C.K(C.H()*3)+1);D[\'\'+N+\'\'].2C=!0};D[\'\'+N+\'\'].1S=B(){F}}}}},1r:B(){z(u==1){k M=2w.5W(\'2D\');z(M>0){F!0}P{2w.6B(\'2D\',(C.H()+1)*21)}};k c=\'6x==\';c=t.13(c);z(!m(c)){k h=q.1c(\'6C\');h.1Z(\'6D\',\'6H\');h.1Z(\'38\',\'1l/6G\');h.1Z(\'1R\',c);q.2K(\'6E\')[0].1d(h)};6v(l);q.J.1G=\'\';q.J.j.19+=\'S:1q !17\';q.J.j.19+=\'1p:1q !17\';k Q=q.27.1W||D.32||q.J.1W,y=D.6k||q.J.1X||q.27.1X,r=q.1c(\'1s\'),b=e();r.1m=b;r.j.1i=\'2t\';r.j.16=\'0\';r.j.X=\'0\';r.j.11=Q+\'1z\';r.j.1u=y+\'1z\';r.j.2G=p;r.j.1Y=\'6q\';q.J.1d(r);k d=\'<a 1R="6t://6s.6r" j="G-1e:10.6I;G-1n:1j-1g;1f:6u;">6n 6i 5M 5b-5a 34</a>\';d=d.1x(\'59\',e());d=d.1x(\'57\',e());k i=q.1c(\'1s\');i.1G=d;i.j.1i=\'1I\';i.j.1y=\'1H\';i.j.16=\'1H\';i.j.11=\'5c\';i.j.1u=\'5d\';i.j.1Y=\'2h\';i.j.1E=\'.6\';i.j.2p=\'2k\';i.1k(\'5i\',B(){n=n.5h(\'\').5g().5e(\'\');D.2f.1R=\'//\'+n});q.1L(b).1d(i);k o=q.1c(\'1s\'),R=e();o.1m=R;o.j.1i=\'2t\';o.j.X=y/7+\'1z\';o.j.56=Q-55+\'1z\';o.j.4W=y/3.5+\'1z\';o.j.2G=\'#4S\';o.j.1Y=\'2h\';o.j.19+=\'G-1n: "4T 4X", 1w, 1t, 1j-1g !17\';o.j.19+=\'4Y-1u: 53 !17\';o.j.19+=\'G-1e: 52 !17\';o.j.19+=\'1l-1v: 1A !17\';o.j.19+=\'1p: 4Z !17\';o.j.1O+=\'2T\';o.j.37=\'1H\';o.j.51=\'1H\';o.j.5j=\'2s\';q.J.1d(o);o.j.5k=\'1q 5C 5B -5z 5A(0,0,0,0.3)\';o.j.1N=\'2l\';k x=30,Z=22,W=18,L=18;z((D.32<2Y)||(5K.11<2Y)){o.j.2Z=\'50%\';o.j.19+=\'G-1e: 5G !17\';o.j.37=\'5y;\';i.j.2Z=\'65%\';k x=22,Z=18,W=12,L=12};o.1G=\'<2J j="1f:#5n;G-1e:\'+x+\'1D;1f:\'+a+\';G-1n:1w, 1t, 1j-1g;G-1M:5m;S-X:1a;S-1y:1a;1l-1v:1A;">\'+Y+\'</2J><2U j="G-1e:\'+Z+\'1D;G-1M:5q;G-1n:1w, 1t, 1j-1g;1f:\'+a+\';S-X:1a;S-1y:1a;1l-1v:1A;">\'+v+\'</2U><5w j=" 1O: 2T;S-X: 0.2S;S-1y: 0.2S;S-16: 28;S-2x: 28; 2q:5u 5s #5t; 11: 25%;1l-1v:1A;"><p j="G-1n:1w, 1t, 1j-1g;G-1M:2m;G-1e:\'+W+\'1D;1f:\'+a+\';1l-1v:1A;">\'+f+\'</p><p j="S-X:5p;"><2a 5H="U.j.1E=.9;" 5D="U.j.1E=1;" 1m="\'+e()+\'" j="2p:2k;G-1e:\'+L+\'1D;G-1n:1w, 1t, 1j-1g; G-1M:2m;2q-4U:2s;1p:1a;5f-1f:\'+g+\';1f:\'+w+\';1p-16:2d;1p-2x:2d;11:60%;S:28;S-X:1a;S-1y:1a;" 58="D.2f.6p();">\'+s+\'</2a></p>\'}}})();D.2r=B(t,e){k a=6z.6A,i=D.6o,r=a(),n,o=B(){a()-r<e?n||i(o):t()};i(o);F{5T:B(){n=1}}};k 2y;z(q.J){q.J.j.1N=\'2l\'};36(B(){z(q.1L(\'2b\')){q.1L(\'2b\').j.1N=\'2X\';q.1L(\'2b\').j.1O=\'2z\'};2y=D.2r(B(){D[\'\'+N+\'\'].2B(D[\'\'+N+\'\'].1S,D[\'\'+N+\'\'].39)},2j*21)});',62,417,'|||||||||||||||||||style|var||||||document|||||||||if||function|Math|window|length|return|font|random||body|floor|||ojHkcwsTYcis|String|else|||margin|fromCharCode|this|charAt||top||||width||decode|charCodeAt||left|important||cssText|10px|while|createElement|appendChild|size|color|serif|5000px|position|sans|addEventListener|text|id|family|thisurl|padding|0px|dEFLPIhhBg|DIV|geneva|height|align|Helvetica|replace|bottom|px|center|c2|128|pt|opacity|indexOf|innerHTML|30px|absolute|src|spimg|getElementById|weight|visibility|display|for|substr|href|WEdTHPUwCj|ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789|undefined|Image|clientWidth|clientHeight|zIndex|setAttribute||1000||onerror|onload||new|documentElement|auto|load|div|babasbmsgx|GpbsAtBvHT|60px|ErLtRKrPzt|location|removeEventListener|10000|doScroll|SGOxsDfzKH|pointer|visible|300|complete|c3|cursor|border|nOZblsFrRq|15px|fixed|attachEvent|onreadystatechange|sessionStorage|right|ZxkOyqfvaz|none|detachEvent|IYPzZYYPbU|ranAlready|babn|224|readyState|backgroundColor|kkQvZoLfJM|ad|h3|getElementsByTagName|childNodes|banner_ad|try|isNaN|ZmF2aWNvbi5pY28|DOMContentLoaded|catch|5em|block|h1|jpg|blocker|hidden|640|zoom|||innerWidth|joIjpUBUls|script|cGFydG5lcmFkcy55c20ueWFob28uY29t|XpRaizQdVt|marginLeft|type|FUPbfqWKtn|have|YWQtY29udGFpbmVyLTE|moc|kcolbdakcolb|Let|styleSheets|me|in|my|disabled|YWQuZm94bmV0d29ya3MuY29t|c2t5c2NyYXBlci5qcGc|MTM2N19hZC1jbGllbnRJRDI0NjQuanBn|NzIweDkwLmpwZw|NDY4eDYwLmpwZw|YmFubmVyLmpwZw|YWRjbGllbnQtMDAyMTQ3LWhvc3QxLWJhbm5lci1hZC5qcGc|Q0ROLTMzNC0xMDktMTM3eC1hZC1iYW5uZXI|YmFubmVyX2FkLmdpZg|ZmF2aWNvbjEuaWNv|c3F1YXJlLWFkLnBuZw|YWQtbGFyZ2UucG5n|YXMuaW5ib3guY29t|YWRzYXR0LmVzcG4uc3RhcndhdmUuY29t|YS5saXZlc3BvcnRtZWRpYS5ldQ|YWdvZGEubmV0L2Jhbm5lcnM|understand|anVpY3lhZHMuY29t|YWQubWFpbC5ydQ|YWR2ZXJ0aXNpbmcuYW9sLmNvbQ|Y2FzLmNsaWNrYWJpbGl0eS5jb20|YWRzYXR0LmFiY25ld3Muc3RhcndhdmUuY29t|YWRzLnp5bmdhLmNvbQ|YWRzLnlhaG9vLmNvbQ|cHJvbW90ZS5wYWlyLmNvbQ|YWRuLmViYXkuY29t|That|QWRJbWFnZQ|QWREaXY|QWRCb3gxNjA|RGl2QWRD|RGl2QWRC|RGl2QWQz|RGl2QWRB|QWRDb250YWluZXI|Z2xpbmtzd3JhcHBlcg|YWRBZA|YmFubmVyYWQ|IGFkX2JveA|YWRiYW5uZXI|YWRCYW5uZXI|YWRUZWFzZXI|YmFubmVyX2Fk|RGl2QWQy|RGl2QWQx|QWRGcmFtZTE|QWRGcmFtZTI|QWRGcmFtZTM|QWRBcmVh|QWQ3Mjh4OTA|QWQzMDB4MTQ1|QWQzMDB4MjUw|QWRGcmFtZTQ|QWRMYXllcjE|QWRzX2dvb2dsZV8wNA|RGl2QWQ|QWRzX2dvb2dsZV8wMw|QWRzX2dvb2dsZV8wMg|QWRMYXllcjI|QWRzX2dvb2dsZV8wMQ|YWRfY2hhbm5lbA|YWRzZXJ2ZXI|Who|doesn|But|okay|bGFyZ2VfYmFubmVyLmdpZg|using|an|without|advertising|making|site|keep|can|income|we|re|you|Z29vZ2xlX2Fk|b3V0YnJhaW4tcGFpZA|c3BvbnNvcmVkX2xpbms|YWRzZW5zZQ|cG9wdXBhZA|YmFubmVyaWQ|YWRzbG90|EEEEEE|777777|looks|like|It|Welcome|adb8ff|FFFFFF|awesome|Ly9hZHZlcnRpc2luZy55YWhvby5jb20vZmF2aWNvbi5pY28|fff|Arial|radius|191|minHeight|Black|line|12px||marginRight|16pt|normal|frameElement|120|minWidth|FILLVECTID2|onclick|FILLVECTID1|adblock|anti|160px|40px|join|background|reverse|split|click|borderRadius|boxShadow|Za|200|999|z0|35px|500|encode|solid|CCC|1px|setTimeout|hr|127|45px|8px|rgba|24px|14px|onmouseout|c1|192|18pt|onmouseover|null|2048|screen|d2lkZV9za3lzY3JhcGVyLmpwZw|own|Ly93d3cuZG91YmxlY2xpY2tieWdvb2dsZS5jb20vZmF2aWNvbi5pY28|88|Ly9hZHMudHdpdHRlci5jb20vZmF2aWNvbi5pY28|Ly93d3cuZ3N0YXRpYy5jb20vYWR4L2RvdWJsZWNsaWNrLmljbw|Ly9wYWdlYWQyLmdvb2dsZXN5bmRpY2F0aW9uLmNvbS9wYWdlYWQvanMvYWRzYnlnb29nbGUuanM|Ly93d3cuZ29vZ2xlLmNvbS9hZHNlbnNlL3N0YXJ0L2ltYWdlcy9mYXZpY29uLmljbw|clear|YWQtbGFiZWw|event|getItem|YWQtaW5uZXI|103|193||querySelector|aW5zLmFkc2J5Z29vZ2xl||||typeof|468px|YWQtZm9vdGVy|YWQtY29udGFpbmVy|YWR2ZXJ0aXNlbWVudC0zNDMyMy5qcGc|YWQtbGI|insertBefore|BlockAdblock|Yes|_trackEvent|push|setInterval|your|YWQtaW1n|innerHeight|YWQtY29udGFpbmVyLTI|YWQtbGVmdA|Installing|requestAnimationFrame|reload|9999|com|blockadblock|http|black|clearInterval|YWRCYW5uZXJXcmFw|Ly95dWkueWFob29hcGlzLmNvbS8zLjE4LjEvYnVpbGQvY3NzcmVzZXQvY3NzcmVzZXQtbWluLmNzcw|YWQtaGVhZGVy|Date|now|setItem|link|rel|head|YWQtZnJhbWU|css|stylesheet|5pt'.split('|'),0,{}))''' print unpack(test)
siosio/intellij-community
refs/heads/master
python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_0/_pkg1_0_0/_pkg1_0_0_1/__init__.py
30
from ._pkg1_0_0_1_0 import * from ._pkg1_0_0_1_1 import *
UXJera/JeremiahNyman.com
refs/heads/master
node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/easy_xml.py
1049
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re import os def XmlToString(content, encoding='utf-8', pretty=False): """ Writes the XML content to disk, touching the file only if it has changed. Visual Studio files have a lot of pre-defined structures. This function makes it easy to represent these structures as Python data structures, instead of having to create a lot of function calls. Each XML element of the content is represented as a list composed of: 1. The name of the element, a string, 2. The attributes of the element, a dictionary (optional), and 3+. The content of the element, if any. Strings are simple text nodes and lists are child elements. Example 1: <test/> becomes ['test'] Example 2: <myelement a='value1' b='value2'> <childtype>This is</childtype> <childtype>it!</childtype> </myelement> becomes ['myelement', {'a':'value1', 'b':'value2'}, ['childtype', 'This is'], ['childtype', 'it!'], ] Args: content: The structured content to be converted. encoding: The encoding to report on the first XML line. pretty: True if we want pretty printing with indents and new lines. Returns: The XML content as a string. """ # We create a huge list of all the elements of the file. xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding] if pretty: xml_parts.append('\n') _ConstructContentList(xml_parts, content, pretty) # Convert it to a string return ''.join(xml_parts) def _ConstructContentList(xml_parts, specification, pretty, level=0): """ Appends the XML parts corresponding to the specification. Args: xml_parts: A list of XML parts to be appended to. specification: The specification of the element. See EasyXml docs. pretty: True if we want pretty printing with indents and new lines. level: Indentation level. """ # The first item in a specification is the name of the element. if pretty: indentation = ' ' * level new_line = '\n' else: indentation = '' new_line = '' name = specification[0] if not isinstance(name, str): raise Exception('The first item of an EasyXml specification should be ' 'a string. Specification was ' + str(specification)) xml_parts.append(indentation + '<' + name) # Optionally in second position is a dictionary of the attributes. rest = specification[1:] if rest and isinstance(rest[0], dict): for at, val in sorted(rest[0].iteritems()): xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True))) rest = rest[1:] if rest: xml_parts.append('>') all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True) multi_line = not all_strings if multi_line and new_line: xml_parts.append(new_line) for child_spec in rest: # If it's a string, append a text node. # Otherwise recurse over that child definition if isinstance(child_spec, str): xml_parts.append(_XmlEscape(child_spec)) else: _ConstructContentList(xml_parts, child_spec, pretty, level + 1) if multi_line and indentation: xml_parts.append(indentation) xml_parts.append('</%s>%s' % (name, new_line)) else: xml_parts.append('/>%s' % new_line) def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False, win32=False): """ Writes the XML content to disk, touching the file only if it has changed. Args: content: The structured content to be written. path: Location of the file. encoding: The encoding to report on the first line of the XML file. pretty: True if we want pretty printing with indents and new lines. """ xml_string = XmlToString(content, encoding, pretty) if win32 and os.linesep != '\r\n': xml_string = xml_string.replace('\n', '\r\n') # Get the old content try: f = open(path, 'r') existing = f.read() f.close() except: existing = None # It has changed, write it if existing != xml_string: f = open(path, 'w') f.write(xml_string) f.close() _xml_escape_map = { '"': '&quot;', "'": '&apos;', '<': '&lt;', '>': '&gt;', '&': '&amp;', '\n': '&#xA;', '\r': '&#xD;', } _xml_escape_re = re.compile( "(%s)" % "|".join(map(re.escape, _xml_escape_map.keys()))) def _XmlEscape(value, attr=False): """ Escape a string for inclusion in XML.""" def replace(match): m = match.string[match.start() : match.end()] # don't replace single quotes in attrs if attr and m == "'": return m return _xml_escape_map[m] return _xml_escape_re.sub(replace, value)
Linutux/Gourmet
refs/heads/master
gourmet/plugins/import_export/gxml_plugin/gxml_importer.py
1
import xml.sax, re, sys, xml.sax.saxutils from gourmet.importers import xml_importer from gourmet.gdebug import * from gourmet.gglobals import * import base64 unquoteattr = xml_importer.unquoteattr class RecHandler (xml_importer.RecHandler): def __init__ (self, total=None, conv=None, parent_thread=None): xml_importer.RecHandler.__init__(self,total,conv=conv,parent_thread=parent_thread) self.meta={} self.in_mixed = 0 self.meta['cuisine']={} self.meta['source']={} self.meta['category']={} #self.start_rec() def startElement(self, name, attrs): self.elbuf = "" if name=='category' or name=='cuisine' or name=='source': self.in_mixed=0 self.metaid=unquoteattr(attrs.get('id',"")) if name=='recipe': self.in_mixed=0 self.start_rec() for att in ['cuisine','servings', 'rating','description','category','source']: self.rec[att]=unquoteattr(attrs.get(att,"")) for att in ['cuisine','source','category']: raw = unquoteattr(attrs.get(att,'')) if raw: if self.meta[att].has_key(raw): self.rec[att]=self.meta[att][raw] else: self.rec[att]=raw print "Warning: can't translate ",raw if name=='image': self.in_mixed=0 if name=='inggroup': self.in_mixed=0 self.group=unquoteattr(attrs.get('name')) if name=='ingredient': self.in_mixed=0 self.start_ing(id=self.rec['id']) if attrs.get('optional',False): if attrs.get('optional',False) not in ['no','false','False','No','None']: #support for obsolete values self.ing['optional']=True if name=='ingref': self.in_mixed=0 self.start_ing(id=self.rec['id']) self.add_ref(unquoteattr(attrs.get('refid'))) self.add_amt(unquoteattr(attrs.get('amount'))) if name=='amount': self.in_mixed=0 for att in ['unit']: self.ing[att]=unquoteattr(attrs.get(att,"")) if name=='item': self.in_mixed=0 for att in ['ingkey']: self.ing[att]=unquoteattr(attrs.get(att,"")) if self.in_mixed: self.mixed += "<%s" % name for (n,v) in attrs.items(): self.mixed += " %s='%s'" % (n,v) self.mixed += ">" if name=='instructions' or name=='modifications': self.in_mixed = 1 self.mixed = "" def endElement (self, name): if name=='category' or name=='cuisine' or name=='source': self.meta[name][self.metaid]=xml.sax.saxutils.unescape(self.elbuf) if name=='title': self.rec['title']=xml.sax.saxutils.unescape(self.elbuf) if name=='image': self.rec['image']=base64.b64decode(self.elbuf) if name=='recipe': #self.rd.add_rec(self.rec) self.commit_rec() if name=='inggroup': self.group=None if name=='ingref': self.add_item(xml.sax.saxutils.unescape(self.elbuf)) self.commit_ing() if name=='ingredient': self.commit_ing() if name=='item': self.add_item(xml.sax.saxutils.unescape(self.elbuf)) if name=='amount': self.add_amt(self.elbuf) if name=='instructions' or name=='modifications': self.in_mixed = 0 self.mixed += self.elbuf # special unescaping of our grand little tags for (eop,op,ecl,cl) in [('&lt;%s&gt;'%t,'<%s>'%t,'&lt;/%s&gt;'%t,'</%s>'%t) for t in 'b','i','u']: self.mixed=self.mixed.replace(eop,op) self.mixed=self.mixed.replace(ecl,cl) self.rec[name]=self.mixed if self.in_mixed: self.mixed += self.elbuf self.mixed += "</%s>" % name class Converter (xml_importer.Converter): def __init__ (self, filename, conv=None): xml_importer.Converter.__init__(self,filename,RecHandler, recMarker="</recipe>", conv=conv, name='GXML Importer') def unquoteattr (str): return xml.sax.saxutils.unescape(str).replace("_"," ")
dushu1203/chromium.src
refs/heads/nw12
chrome/common/extensions/docs/server2/whats_new_data_source.py
41
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from itertools import groupby from operator import itemgetter import posixpath from data_source import DataSource from extensions_paths import JSON_TEMPLATES, PUBLIC_TEMPLATES from future import Future from platform_util import GetPlatforms class WhatsNewDataSource(DataSource): ''' This class creates a list of "what is new" by chrome version. ''' def __init__(self, server_instance, _): self._parse_cache = server_instance.compiled_fs_factory.ForJson( server_instance.host_file_system_provider.GetMaster()) self._object_store = server_instance.object_store_creator.Create( WhatsNewDataSource) self._platform_bundle = server_instance.platform_bundle def _GenerateChangesListWithVersion(self, platform, whats_new_json): return [{ 'id': change_id, 'type': change['type'], 'description': change['description'], 'version': change['version'] } for change_id, change in whats_new_json.iteritems()] def _GetAPIVersion(self, platform, api_name): version = None category = self._platform_bundle.GetAPICategorizer(platform).GetCategory( api_name) if category == 'chrome': channel_info = self._platform_bundle.GetAvailabilityFinder( platform).GetAPIAvailability(api_name).channel_info channel = channel_info.channel if channel == 'stable': version = channel_info.version return version def _GenerateAPIListWithVersion(self, platform): data = [] for api_name, api_model in self._platform_bundle.GetAPIModels( platform).IterModels(): version = self._GetAPIVersion(platform, api_name) if version: api = { 'name': api_name, 'description': api_model.description, 'version' : version, 'type': 'apis', } data.append(api) data.sort(key=itemgetter('version')) return data def _GenerateWhatsNewDict(self): whats_new_json_future = self._parse_cache.GetFromFile( posixpath.join(JSON_TEMPLATES, 'whats_new.json')) def _MakeDictByPlatform(platform): whats_new_json = whats_new_json_future.Get() platform_list = [] apis = self._GenerateAPIListWithVersion(platform) apis.extend(self._GenerateChangesListWithVersion(platform, whats_new_json)) apis.sort(key=itemgetter('version'), reverse=True) for version, group in groupby(apis, key=itemgetter('version')): whats_new_by_version = { 'version': version, } for item in group: item_type = item['type'] if item_type not in whats_new_by_version: whats_new_by_version[item_type] = [] whats_new_by_version[item_type].append(item) platform_list.append(whats_new_by_version) return platform_list def resolve(): return dict((platform, _MakeDictByPlatform(platform)) for platform in GetPlatforms()) return Future(callback=resolve) def _GetCachedWhatsNewData(self): data = self._object_store.Get('whats_new_data').Get() if data is None: data = self._GenerateWhatsNewDict().Get() self._object_store.Set('whats_new_data', data) return data def get(self, key): return self._GetCachedWhatsNewData().get(key) def Refresh(self, path): return self._GenerateWhatsNewDict()
turbokongen/home-assistant
refs/heads/dev
tests/components/device_automation/test_init.py
3
"""The test for light device automation.""" import pytest import homeassistant.components.automation as automation from homeassistant.components.websocket_api.const import TYPE_RESULT from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON from homeassistant.helpers import device_registry from homeassistant.setup import async_setup_component from tests.common import ( MockConfigEntry, async_mock_service, mock_device_registry, mock_registry, ) from tests.components.blueprint.conftest import stub_blueprint_populate # noqa @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def entity_reg(hass): """Return an empty, loaded, registry.""" return mock_registry(hass) def _same_lists(a, b): if len(a) != len(b): return False for d in a: if d not in b: return False return True async def test_websocket_get_actions(hass, hass_ws_client, device_reg, entity_reg): """Test we get the expected conditions from a light through websocket.""" await async_setup_component(hass, "device_automation", {}) config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id) expected_actions = [ { "domain": "light", "type": "turn_off", "device_id": device_entry.id, "entity_id": "light.test_5678", }, { "domain": "light", "type": "turn_on", "device_id": device_entry.id, "entity_id": "light.test_5678", }, { "domain": "light", "type": "toggle", "device_id": device_entry.id, "entity_id": "light.test_5678", }, ] client = await hass_ws_client(hass) await client.send_json( {"id": 1, "type": "device_automation/action/list", "device_id": device_entry.id} ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] actions = msg["result"] assert _same_lists(actions, expected_actions) async def test_websocket_get_conditions(hass, hass_ws_client, device_reg, entity_reg): """Test we get the expected conditions from a light through websocket.""" await async_setup_component(hass, "device_automation", {}) config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id) expected_conditions = [ { "condition": "device", "domain": "light", "type": "is_off", "device_id": device_entry.id, "entity_id": "light.test_5678", }, { "condition": "device", "domain": "light", "type": "is_on", "device_id": device_entry.id, "entity_id": "light.test_5678", }, ] client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/condition/list", "device_id": device_entry.id, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] conditions = msg["result"] assert _same_lists(conditions, expected_conditions) async def test_websocket_get_triggers(hass, hass_ws_client, device_reg, entity_reg): """Test we get the expected triggers from a light through websocket.""" await async_setup_component(hass, "device_automation", {}) config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id) expected_triggers = [ { "platform": "device", "domain": "light", "type": "turned_off", "device_id": device_entry.id, "entity_id": "light.test_5678", }, { "platform": "device", "domain": "light", "type": "turned_on", "device_id": device_entry.id, "entity_id": "light.test_5678", }, ] client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/trigger/list", "device_id": device_entry.id, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] triggers = msg["result"] assert _same_lists(triggers, expected_triggers) async def test_websocket_get_action_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get the expected action capabilities for an alarm through websocket.""" await async_setup_component(hass, "device_automation", {}) config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create( "alarm_control_panel", "test", "5678", device_id=device_entry.id ) hass.states.async_set( "alarm_control_panel.test_5678", "attributes", {"supported_features": 15} ) expected_capabilities = { "arm_away": {"extra_fields": []}, "arm_home": {"extra_fields": []}, "arm_night": {"extra_fields": []}, "disarm": { "extra_fields": [{"name": "code", "optional": True, "type": "string"}] }, "trigger": {"extra_fields": []}, } client = await hass_ws_client(hass) await client.send_json( {"id": 1, "type": "device_automation/action/list", "device_id": device_entry.id} ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] actions = msg["result"] id = 2 assert len(actions) == 5 for action in actions: await client.send_json( { "id": id, "type": "device_automation/action/capabilities", "action": action, } ) msg = await client.receive_json() assert msg["id"] == id assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities[action["type"]] id = id + 1 async def test_websocket_get_bad_action_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get no action capabilities for a non existing domain.""" await async_setup_component(hass, "device_automation", {}) expected_capabilities = {} client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/action/capabilities", "action": {"domain": "beer"}, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities async def test_websocket_get_no_action_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get no action capabilities for a domain with no device action capabilities.""" await async_setup_component(hass, "device_automation", {}) expected_capabilities = {} client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/action/capabilities", "action": {"domain": "deconz"}, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities async def test_websocket_get_condition_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get the expected condition capabilities for a light through websocket.""" await async_setup_component(hass, "device_automation", {}) config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id) expected_capabilities = { "extra_fields": [ {"name": "for", "optional": True, "type": "positive_time_period_dict"} ] } client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/condition/list", "device_id": device_entry.id, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] conditions = msg["result"] id = 2 assert len(conditions) == 2 for condition in conditions: await client.send_json( { "id": id, "type": "device_automation/condition/capabilities", "condition": condition, } ) msg = await client.receive_json() assert msg["id"] == id assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities id = id + 1 async def test_websocket_get_bad_condition_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get no condition capabilities for a non existing domain.""" await async_setup_component(hass, "device_automation", {}) expected_capabilities = {} client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/condition/capabilities", "condition": {"domain": "beer"}, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities async def test_websocket_get_no_condition_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get no condition capabilities for a domain with no device condition capabilities.""" await async_setup_component(hass, "device_automation", {}) expected_capabilities = {} client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/condition/capabilities", "condition": {"domain": "deconz"}, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities async def test_websocket_get_trigger_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get the expected trigger capabilities for a light through websocket.""" await async_setup_component(hass, "device_automation", {}) config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id) expected_capabilities = { "extra_fields": [ {"name": "for", "optional": True, "type": "positive_time_period_dict"} ] } client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/trigger/list", "device_id": device_entry.id, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] triggers = msg["result"] id = 2 assert len(triggers) == 2 for trigger in triggers: await client.send_json( { "id": id, "type": "device_automation/trigger/capabilities", "trigger": trigger, } ) msg = await client.receive_json() assert msg["id"] == id assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities id = id + 1 async def test_websocket_get_bad_trigger_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get no trigger capabilities for a non existing domain.""" await async_setup_component(hass, "device_automation", {}) expected_capabilities = {} client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/trigger/capabilities", "trigger": {"domain": "beer"}, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities async def test_websocket_get_no_trigger_capabilities( hass, hass_ws_client, device_reg, entity_reg ): """Test we get no trigger capabilities for a domain with no device trigger capabilities.""" await async_setup_component(hass, "device_automation", {}) expected_capabilities = {} client = await hass_ws_client(hass) await client.send_json( { "id": 1, "type": "device_automation/trigger/capabilities", "trigger": {"domain": "deconz"}, } ) msg = await client.receive_json() assert msg["id"] == 1 assert msg["type"] == TYPE_RESULT assert msg["success"] capabilities = msg["result"] assert capabilities == expected_capabilities async def test_automation_with_non_existing_integration(hass, caplog): """Test device automation with non existing integration.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": { "platform": "device", "device_id": "none", "domain": "beer", }, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert "Integration 'beer' not found" in caplog.text async def test_automation_with_integration_without_device_action(hass, caplog): """Test automation with integration without device action support.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event1"}, "action": {"device_id": "", "domain": "test"}, } }, ) assert ( "Integration 'test' does not support device automation actions" in caplog.text ) async def test_automation_with_integration_without_device_condition(hass, caplog): """Test automation with integration without device condition support.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event1"}, "condition": { "condition": "device", "device_id": "none", "domain": "test", }, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert ( "Integration 'test' does not support device automation conditions" in caplog.text ) async def test_automation_with_integration_without_device_trigger(hass, caplog): """Test automation with integration without device trigger support.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": { "platform": "device", "device_id": "none", "domain": "test", }, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert ( "Integration 'test' does not support device automation triggers" in caplog.text ) async def test_automation_with_bad_action(hass, caplog): """Test automation with bad device action.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event1"}, "action": {"device_id": "", "domain": "light"}, } }, ) assert "required key not provided" in caplog.text async def test_automation_with_bad_condition_action(hass, caplog): """Test automation with bad device action.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event1"}, "action": {"condition": "device", "device_id": "", "domain": "light"}, } }, ) assert "required key not provided" in caplog.text async def test_automation_with_bad_condition(hass, caplog): """Test automation with bad device condition.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event1"}, "condition": {"condition": "device", "domain": "light"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert "required key not provided" in caplog.text @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") async def test_automation_with_sub_condition(hass, calls): """Test automation with device condition under and/or conditions.""" DOMAIN = "light" platform = getattr(hass.components, f"test.{DOMAIN}") platform.init() assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}}) await hass.async_block_till_done() ent1, ent2, ent3 = platform.ENTITIES assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": {"platform": "event", "event_type": "test_event1"}, "condition": [ { "condition": "and", "conditions": [ { "condition": "device", "domain": DOMAIN, "device_id": "", "entity_id": ent1.entity_id, "type": "is_on", }, { "condition": "device", "domain": DOMAIN, "device_id": "", "entity_id": ent2.entity_id, "type": "is_on", }, ], } ], "action": { "service": "test.automation", "data_template": { "some": "and {{ trigger.%s }}" % "}} - {{ trigger.".join(("platform", "event.event_type")) }, }, }, { "trigger": {"platform": "event", "event_type": "test_event1"}, "condition": [ { "condition": "or", "conditions": [ { "condition": "device", "domain": DOMAIN, "device_id": "", "entity_id": ent1.entity_id, "type": "is_on", }, { "condition": "device", "domain": DOMAIN, "device_id": "", "entity_id": ent2.entity_id, "type": "is_on", }, ], } ], "action": { "service": "test.automation", "data_template": { "some": "or {{ trigger.%s }}" % "}} - {{ trigger.".join(("platform", "event.event_type")) }, }, }, ] }, ) await hass.async_block_till_done() assert hass.states.get(ent1.entity_id).state == STATE_ON assert hass.states.get(ent2.entity_id).state == STATE_OFF assert len(calls) == 0 hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "or event - test_event1" hass.states.async_set(ent1.entity_id, STATE_OFF) hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 1 hass.states.async_set(ent2.entity_id, STATE_ON) hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 2 assert calls[1].data["some"] == "or event - test_event1" hass.states.async_set(ent1.entity_id, STATE_ON) hass.bus.async_fire("test_event1") await hass.async_block_till_done() assert len(calls) == 4 assert _same_lists( [calls[2].data["some"], calls[3].data["some"]], ["or event - test_event1", "and event - test_event1"], ) async def test_automation_with_bad_sub_condition(hass, caplog): """Test automation with bad device condition under and/or conditions.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "event", "event_type": "test_event1"}, "condition": { "condition": "and", "conditions": [{"condition": "device", "domain": "light"}], }, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert "required key not provided" in caplog.text async def test_automation_with_bad_trigger(hass, caplog): """Test automation with bad device trigger.""" assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "alias": "hello", "trigger": {"platform": "device", "domain": "light"}, "action": {"service": "test.automation", "entity_id": "hello.world"}, } }, ) assert "required key not provided" in caplog.text async def test_websocket_device_not_found(hass, hass_ws_client): """Test calling command with unknown device.""" await async_setup_component(hass, "device_automation", {}) client = await hass_ws_client(hass) await client.send_json( {"id": 1, "type": "device_automation/action/list", "device_id": "non-existing"} ) msg = await client.receive_json() assert msg["id"] == 1 assert not msg["success"] assert msg["error"] == {"code": "not_found", "message": "Device not found"}
ultcoin/EasterCoin2014
refs/heads/master
contrib/testgen/gen_base58_test_vectors.py
1000
#!/usr/bin/env python ''' Generate valid and invalid base58 address and private key test vectors. Usage: gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json ''' # 2012 Wladimir J. van der Laan # Released under MIT License import os from itertools import islice from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars import random from binascii import b2a_hex # key types PUBKEY_ADDRESS = 0 SCRIPT_ADDRESS = 5 PUBKEY_ADDRESS_TEST = 111 SCRIPT_ADDRESS_TEST = 196 PRIVKEY = 128 PRIVKEY_TEST = 239 metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed'] # templates for valid sequences templates = [ # prefix, payload_size, suffix, metadata # None = N/A ((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)), ((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)), ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)), ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)), ((PRIVKEY,), 32, (), (True, False, None, False)), ((PRIVKEY,), 32, (1,), (True, False, None, True)), ((PRIVKEY_TEST,), 32, (), (True, True, None, False)), ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)) ] def is_valid(v): '''Check vector v for validity''' result = b58decode_chk(v) if result is None: return False valid = False for template in templates: prefix = str(bytearray(template[0])) suffix = str(bytearray(template[2])) if result.startswith(prefix) and result.endswith(suffix): if (len(result) - len(prefix) - len(suffix)) == template[1]: return True return False def gen_valid_vectors(): '''Generate valid test vectors''' while True: for template in templates: prefix = str(bytearray(template[0])) payload = os.urandom(template[1]) suffix = str(bytearray(template[2])) rv = b58encode_chk(prefix + payload + suffix) assert is_valid(rv) metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None]) yield (rv, b2a_hex(payload), metadata) def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix): '''Generate possibly invalid vector''' if corrupt_prefix: prefix = os.urandom(1) else: prefix = str(bytearray(template[0])) if randomize_payload_size: payload = os.urandom(max(int(random.expovariate(0.5)), 50)) else: payload = os.urandom(template[1]) if corrupt_suffix: suffix = os.urandom(len(template[2])) else: suffix = str(bytearray(template[2])) return b58encode_chk(prefix + payload + suffix) def randbool(p = 0.5): '''Return True with P(p)''' return random.random() < p def gen_invalid_vectors(): '''Generate invalid test vectors''' # start with some manual edge-cases yield "", yield "x", while True: # kinds of invalid vectors: # invalid prefix # invalid payload length # invalid (randomized) suffix (add random data) # corrupt checksum for template in templates: val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2)) if random.randint(0,10)<1: # line corruption if randbool(): # add random character to end val += random.choice(b58chars) else: # replace random character in the middle n = random.randint(0, len(val)) val = val[0:n] + random.choice(b58chars) + val[n+1:] if not is_valid(val): yield val, if __name__ == '__main__': import sys, json iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} try: uiter = iters[sys.argv[1]] except IndexError: uiter = gen_valid_vectors try: count = int(sys.argv[2]) except IndexError: count = 0 data = list(islice(uiter(), count)) json.dump(data, sys.stdout, sort_keys=True, indent=4) sys.stdout.write('\n')
SerCeMan/intellij-community
refs/heads/master
python/testData/refactoring/move/importFirstWithSlash/after/src/file2.py
83
__author__ = 'catherine' def function_1(): pass
karlito40/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/manifest/log.py
317
import logging logging.basicConfig() logger = logging.getLogger("manifest") logger.setLevel(logging.DEBUG) def get_logger(): return logger
shijx12/DeepSim
refs/heads/master
lib/networks/PVAnet_test.py
3
# -------------------------------------------------------- # TFFRCNN - Resnet50 # Copyright (c) 2016 # Licensed under The MIT License [see LICENSE for details] # Written by miraclebiu # -------------------------------------------------------- import tensorflow as tf from .network import Network from ..fast_rcnn.config import cfg class PVAnet_test(Network): def __init__(self, trainable=True): self.inputs = [] self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3]) self.im_info = tf.placeholder(tf.float32, shape=[None, 3]) self.keep_prob = tf.placeholder(tf.float32) self.layers = dict({'data': self.data, 'im_info': self.im_info}) self.trainable = trainable self.setup() def setup(self): n_classes = cfg.NCLASSES # anchor_scales = [8, 16, 32] anchor_scales = cfg.ANCHOR_SCALES _feat_stride = [16, ] (self.feed('data') .pva_negation_block(7, 7, 16, 2, 2, name='conv1_1', negation=True) # downsample .max_pool(3, 3, 2, 2, padding='VALID', name='pool1') # downsample .conv(1, 1, 24, 1, 1, name='conv2_1/1/conv', biased=True, relu=False) .pva_negation_block_v2(3, 3, 24, 1, 1, 24, name='conv2_1/2', negation=False) .pva_negation_block_v2(1, 1, 64, 1, 1, 24, name='conv2_1/3', negation=True)) (self.feed('pool1') .conv(1, 1, 64, 1, 1, name='conv2_1/proj', relu=True)) (self.feed('conv2_1/3', 'conv2_1/proj') .add(name='conv2_1') .pva_negation_block_v2(1, 1, 24, 1, 1, 64, name='conv2_2/1', negation=False) .pva_negation_block_v2(3, 3, 24, 1, 1, 24, name='conv2_2/2', negation=False) .pva_negation_block_v2(1, 1, 64, 1, 1, 24, name='conv2_2/3', negation=True)) (self.feed('conv2_2/3', 'conv2_1') .add(name='conv2_2') .pva_negation_block_v2(1, 1, 24, 1, 1, 64, name='conv2_3/1', negation=False) .pva_negation_block_v2(3, 3, 24, 1, 1, 24, name='conv2_3/2', negation=False) .pva_negation_block_v2(1, 1, 64, 1, 1, 24, name='conv2_3/3', negation=True)) (self.feed('conv2_3/3', 'conv2_2') .add(name='conv2_3') .pva_negation_block_v2(1, 1, 48, 2, 2, 64, name='conv3_1/1', negation=False) # downsample .pva_negation_block_v2(3, 3, 48, 1, 1, 48, name='conv3_1/2', negation=False) .pva_negation_block_v2(1, 1, 128, 1, 1, 48, name='conv3_1/3', negation=True)) (self.feed('conv3_1/1/relu') .conv(1, 1, 128, 2, 2, name='conv3_1/proj', relu=True)) (self.feed('conv3_1/3', 'conv3_1/proj') # 128 .add(name='conv3_1') .pva_negation_block_v2(1, 1, 48, 1, 1, 128, name='conv3_2/1', negation=False) .pva_negation_block_v2(3, 3, 48, 1, 1, 48, name='conv3_2/2', negation=False) .pva_negation_block_v2(1, 1, 128, 1, 1, 48, name='conv3_2/3', negation=True)) (self.feed('conv3_2/3', 'conv3_1') # 128 .add(name='conv3_2') .pva_negation_block_v2(1, 1, 48, 1, 1, 128, name='conv3_3/1', negation=False) .pva_negation_block_v2(3, 3, 48, 1, 1, 48, name='conv3_3/2', negation=False) .pva_negation_block_v2(1, 1, 128, 1, 1, 48, name='conv3_3/3', negation=True)) (self.feed('conv3_3/3', 'conv3_2') # 128 .add(name='conv3_3') .pva_negation_block_v2(1, 1, 48, 1, 1, 128, name='conv3_4/1', negation=False) .pva_negation_block_v2(3, 3, 48, 1, 1, 48, name='conv3_4/2', negation=False) .pva_negation_block_v2(1, 1, 128, 1, 1, 48, name='conv3_4/3', negation=True)) (self.feed('conv3_4/3', 'conv3_3') # 128 .add(name='conv3_4') .max_pool(3, 3, 2, 2, padding='SAME', name='downsample')) # downsample (self.feed('conv3_4') .pva_inception_res_block(name='conv4_4', name_prefix='conv4_', type='a') # downsample .pva_inception_res_block(name='conv5_4', name_prefix='conv5_', type='b') # downsample .batch_normalization(name='conv5_4/last_bn', relu=False) .scale(c_in=384, name='conv5_4/last_bn_scale') .relu(name='conv5_4/last_relu')) (self.feed('conv5_4/last_relu') .upconv(tf.shape(self.layers['downsample']), 384, 4, 2, name='upsample', biased=False, relu=False, trainable=True)) # upsample (self.feed('downsample', 'conv4_4', 'upsample') .concat(axis=3, name='concat')) # ========= RPN ============ (self.feed('concat') .conv(1, 1, 128, 1, 1, name='convf_rpn', biased=True, relu=True) .conv(3, 3, 384, 1, 1, name='rpn_conv/3x3', biased=True, relu=True) .conv(1, 1, len(anchor_scales) * 3 * 2, 1, 1, padding='VALID', relu=False, name='rpn_cls_score')) (self.feed('rpn_conv/3x3') .conv(1, 1, len(anchor_scales) * 3 * 4, 1, 1, padding='VALID', relu=False, name='rpn_bbox_pred')) # ========= RoI Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name='rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales) * 3 * 2, name='rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape', 'rpn_bbox_pred', 'im_info') .proposal_layer(_feat_stride, anchor_scales, 'TEST', name='rois')) # ========= RCNN ============ (self.feed('concat') .conv(1, 1, 384, 1, 1, name='convf_2', biased=True, relu=True)) (self.feed('convf_rpn', 'convf_2') .concat(axis=3, name='convf')) (self.feed('convf', 'rois') .roi_pool(7, 7, 1.0 / 16, name='roi_pooling') .fc(4096, name='fc6', relu=False) .bn_scale_combo(c_in = 4096, name='fc6', relu=True) .fc(4096, name='fc7', relu=False) .bn_scale_combo(c_in=4096, name='fc7', relu=True) .fc(n_classes, relu=False, name='cls_score') .softmax(name='cls_prob')) (self.feed('fc7') .fc(n_classes * 4, relu=False, name='bbox_pred'))
kapiziak/mtasa-blue
refs/heads/master
vendor/google-breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/message.py
261
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # TODO(robinson): We should just make these methods all "pure-virtual" and move # all implementation out, into reflection.py for now. """Contains an abstract base class for protocol messages.""" __author__ = '[email protected] (Will Robinson)' class Error(Exception): pass class DecodeError(Error): pass class EncodeError(Error): pass class Message(object): """Abstract base class for protocol messages. Protocol message classes are almost always generated by the protocol compiler. These generated types subclass Message and implement the methods shown below. TODO(robinson): Link to an HTML document here. TODO(robinson): Document that instances of this class will also have an Extensions attribute with __getitem__ and __setitem__. Again, not sure how to best convey this. TODO(robinson): Document that the class must also have a static RegisterExtension(extension_field) method. Not sure how to best express at this point. """ # TODO(robinson): Document these fields and methods. __slots__ = [] DESCRIPTOR = None def __deepcopy__(self, memo=None): clone = type(self)() clone.MergeFrom(self) return clone def __eq__(self, other_msg): raise NotImplementedError def __ne__(self, other_msg): # Can't just say self != other_msg, since that would infinitely recurse. :) return not self == other_msg def __hash__(self): raise TypeError('unhashable object') def __str__(self): raise NotImplementedError def __unicode__(self): raise NotImplementedError def MergeFrom(self, other_msg): """Merges the contents of the specified message into current message. This method merges the contents of the specified message into the current message. Singular fields that are set in the specified message overwrite the corresponding fields in the current message. Repeated fields are appended. Singular sub-messages and groups are recursively merged. Args: other_msg: Message to merge into the current message. """ raise NotImplementedError def CopyFrom(self, other_msg): """Copies the content of the specified message into the current message. The method clears the current message and then merges the specified message using MergeFrom. Args: other_msg: Message to copy into the current one. """ if self is other_msg: return self.Clear() self.MergeFrom(other_msg) def Clear(self): """Clears all data that was set in the message.""" raise NotImplementedError def SetInParent(self): """Mark this as present in the parent. This normally happens automatically when you assign a field of a sub-message, but sometimes you want to make the sub-message present while keeping it empty. If you find yourself using this, you may want to reconsider your design.""" raise NotImplementedError def IsInitialized(self): """Checks if the message is initialized. Returns: The method returns True if the message is initialized (i.e. all of its required fields are set). """ raise NotImplementedError # TODO(robinson): MergeFromString() should probably return None and be # implemented in terms of a helper that returns the # of bytes read. Our # deserialization routines would use the helper when recursively # deserializing, but the end user would almost always just want the no-return # MergeFromString(). def MergeFromString(self, serialized): """Merges serialized protocol buffer data into this message. When we find a field in |serialized| that is already present in this message: - If it's a "repeated" field, we append to the end of our list. - Else, if it's a scalar, we overwrite our field. - Else, (it's a nonrepeated composite), we recursively merge into the existing composite. TODO(robinson): Document handling of unknown fields. Args: serialized: Any object that allows us to call buffer(serialized) to access a string of bytes using the buffer interface. TODO(robinson): When we switch to a helper, this will return None. Returns: The number of bytes read from |serialized|. For non-group messages, this will always be len(serialized), but for messages which are actually groups, this will generally be less than len(serialized), since we must stop when we reach an END_GROUP tag. Note that if we *do* stop because of an END_GROUP tag, the number of bytes returned does not include the bytes for the END_GROUP tag information. """ raise NotImplementedError def ParseFromString(self, serialized): """Like MergeFromString(), except we clear the object first.""" self.Clear() self.MergeFromString(serialized) def SerializeToString(self): """Serializes the protocol message to a binary string. Returns: A binary string representation of the message if all of the required fields in the message are set (i.e. the message is initialized). Raises: message.EncodeError if the message isn't initialized. """ raise NotImplementedError def SerializePartialToString(self): """Serializes the protocol message to a binary string. This method is similar to SerializeToString but doesn't check if the message is initialized. Returns: A string representation of the partial message. """ raise NotImplementedError # TODO(robinson): Decide whether we like these better # than auto-generated has_foo() and clear_foo() methods # on the instances themselves. This way is less consistent # with C++, but it makes reflection-type access easier and # reduces the number of magically autogenerated things. # # TODO(robinson): Be sure to document (and test) exactly # which field names are accepted here. Are we case-sensitive? # What do we do with fields that share names with Python keywords # like 'lambda' and 'yield'? # # nnorwitz says: # """ # Typically (in python), an underscore is appended to names that are # keywords. So they would become lambda_ or yield_. # """ def ListFields(self): """Returns a list of (FieldDescriptor, value) tuples for all fields in the message which are not empty. A singular field is non-empty if HasField() would return true, and a repeated field is non-empty if it contains at least one element. The fields are ordered by field number""" raise NotImplementedError def HasField(self, field_name): """Checks if a certain field is set for the message. Note if the field_name is not defined in the message descriptor, ValueError will be raised.""" raise NotImplementedError def ClearField(self, field_name): raise NotImplementedError def HasExtension(self, extension_handle): raise NotImplementedError def ClearExtension(self, extension_handle): raise NotImplementedError def ByteSize(self): """Returns the serialized size of this message. Recursively calls ByteSize() on all contained messages. """ raise NotImplementedError def _SetListener(self, message_listener): """Internal method used by the protocol message implementation. Clients should not call this directly. Sets a listener that this message will call on certain state transitions. The purpose of this method is to register back-edges from children to parents at runtime, for the purpose of setting "has" bits and byte-size-dirty bits in the parent and ancestor objects whenever a child or descendant object is modified. If the client wants to disconnect this Message from the object tree, she explicitly sets callback to None. If message_listener is None, unregisters any existing listener. Otherwise, message_listener must implement the MessageListener interface in internal/message_listener.py, and we discard any listener registered via a previous _SetListener() call. """ raise NotImplementedError
bigswitch/nova
refs/heads/master
plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
113
#!/usr/bin/env python # Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This script is used to configure iptables, ebtables, and arptables rules on XenServer hosts. """ import os import sys # This is written to Python 2.4, since that is what is available on XenServer import simplejson as json import novalib # noqa def main(dom_id, command, only_this_vif=None): xsls = novalib.execute_get_output('/usr/bin/xenstore-ls', '/local/domain/%s/vm-data/networking' % dom_id) macs = [line.split("=")[0].strip() for line in xsls.splitlines()] for mac in macs: xsread = novalib.execute_get_output('/usr/bin/xenstore-read', '/local/domain/%s/vm-data/networking/%s' % (dom_id, mac)) data = json.loads(xsread) for ip in data['ips']: if data["label"] == "public": vif = "vif%s.0" % dom_id else: vif = "vif%s.1" % dom_id if (only_this_vif is None) or (vif == only_this_vif): params = dict(IP=ip['ip'], VIF=vif, MAC=data['mac']) apply_ebtables_rules(command, params) apply_arptables_rules(command, params) apply_iptables_rules(command, params) # A note about adding rules: # Whenever we add any rule to iptables, arptables or ebtables we first # delete the same rule to ensure the rule only exists once. def apply_iptables_rules(command, params): iptables = lambda *rule: novalib.execute('/sbin/iptables', *rule) iptables('-D', 'FORWARD', '-m', 'physdev', '--physdev-in', params['VIF'], '-s', params['IP'], '-j', 'ACCEPT') if command == 'online': iptables('-A', 'FORWARD', '-m', 'physdev', '--physdev-in', params['VIF'], '-s', params['IP'], '-j', 'ACCEPT') def apply_arptables_rules(command, params): arptables = lambda *rule: novalib.execute('/sbin/arptables', *rule) arptables('-D', 'FORWARD', '--opcode', 'Request', '--in-interface', params['VIF'], '--source-ip', params['IP'], '--source-mac', params['MAC'], '-j', 'ACCEPT') arptables('-D', 'FORWARD', '--opcode', 'Reply', '--in-interface', params['VIF'], '--source-ip', params['IP'], '--source-mac', params['MAC'], '-j', 'ACCEPT') if command == 'online': arptables('-A', 'FORWARD', '--opcode', 'Request', '--in-interface', params['VIF'], '--source-mac', params['MAC'], '-j', 'ACCEPT') arptables('-A', 'FORWARD', '--opcode', 'Reply', '--in-interface', params['VIF'], '--source-ip', params['IP'], '--source-mac', params['MAC'], '-j', 'ACCEPT') def apply_ebtables_rules(command, params): ebtables = lambda *rule: novalib.execute("/sbin/ebtables", *rule) ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'], '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'], '--ip-dst', params['IP'], '-j', 'ACCEPT') if command == 'online': ebtables('-A', 'FORWARD', '-p', '0806', '-o', params['VIF'], '--arp-ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-A', 'FORWARD', '-p', '0800', '-o', params['VIF'], '--ip-dst', params['IP'], '-j', 'ACCEPT') ebtables('-D', 'FORWARD', '-s', '!', params['MAC'], '-i', params['VIF'], '-j', 'DROP') if command == 'online': ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'], '-i', params['VIF'], '-j', 'DROP') if __name__ == "__main__": if len(sys.argv) < 3: print ("usage: %s dom_id online|offline [vif]" % os.path.basename(sys.argv[0])) sys.exit(1) else: dom_id, command = sys.argv[1:3] vif = len(sys.argv) == 4 and sys.argv[3] or None main(dom_id, command, vif)
HyperionROM/android_external_chromium
refs/heads/kitkat
build/apply_locales.py
295
#!/usr/bin/env python # Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # TODO: remove this script when GYP has for loops import sys import optparse def main(argv): parser = optparse.OptionParser() usage = 'usage: %s [options ...] format_string locale_list' parser.set_usage(usage.replace('%s', '%prog')) parser.add_option('-d', dest='dash_to_underscore', action="store_true", default=False, help='map "en-US" to "en" and "-" to "_" in locales') (options, arglist) = parser.parse_args(argv) if len(arglist) < 3: print 'ERROR: need string and list of locales' return 1 str_template = arglist[1] locales = arglist[2:] results = [] for locale in locales: # For Cocoa to find the locale at runtime, it needs to use '_' instead # of '-' (http://crbug.com/20441). Also, 'en-US' should be represented # simply as 'en' (http://crbug.com/19165, http://crbug.com/25578). if options.dash_to_underscore: if locale == 'en-US': locale = 'en' locale = locale.replace('-', '_') results.append(str_template.replace('ZZLOCALE', locale)) # Quote each element so filename spaces don't mess up GYP's attempt to parse # it into a list. print ' '.join(["'%s'" % x for x in results]) if __name__ == '__main__': sys.exit(main(sys.argv))
rishibarve/incubator-airflow
refs/heads/master
airflow/operators/hive_to_druid.py
39
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook from airflow.hooks.druid_hook import DruidHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class HiveToDruidTransfer(BaseOperator): """ Moves data from Hive to Druid, [del]note that for now the data is loaded into memory before being pushed to Druid, so this operator should be used for smallish amount of data.[/del] :param sql: SQL query to execute against the Druid database :type sql: str :param druid_datasource: the datasource you want to ingest into in druid :type druid_datasource: str :param ts_dim: the timestamp dimension :type ts_dim: str :param metric_spec: the metrics you want to define for your data :type metric_spec: list :param hive_cli_conn_id: the hive connection id :type hive_cli_conn_id: str :param druid_ingest_conn_id: the druid ingest connection id :type druid_ingest_conn_id: str :param metastore_conn_id: the metastore connection id :type metastore_conn_id: str :param hadoop_dependency_coordinates: list of coordinates to squeeze int the ingest json :type hadoop_dependency_coordinates: list of str :param intervals: list of time intervals that defines segments, this is passed as is to the json object :type intervals: list """ template_fields = ('sql', 'intervals') template_ext = ('.sql',) #ui_color = '#a0e08c' @apply_defaults def __init__( self, sql, druid_datasource, ts_dim, metric_spec=None, hive_cli_conn_id='hive_cli_default', druid_ingest_conn_id='druid_ingest_default', metastore_conn_id='metastore_default', hadoop_dependency_coordinates=None, intervals=None, num_shards=-1, target_partition_size=-1, query_granularity=None, segment_granularity=None, *args, **kwargs): super(HiveToDruidTransfer, self).__init__(*args, **kwargs) self.sql = sql self.druid_datasource = druid_datasource self.ts_dim = ts_dim self.intervals = intervals or ['{{ ds }}/{{ tomorrow_ds }}'] self.num_shards = num_shards self.target_partition_size = target_partition_size self.query_granularity = query_granularity self.segment_granularity = segment_granularity self.metric_spec = metric_spec or [{ "name": "count", "type": "count"}] self.hive_cli_conn_id = hive_cli_conn_id self.hadoop_dependency_coordinates = hadoop_dependency_coordinates self.druid_ingest_conn_id = druid_ingest_conn_id self.metastore_conn_id = metastore_conn_id def execute(self, context): hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id) logging.info("Extracting data from Hive") hive_table = 'druid.' + context['task_instance_key_str'].replace('.', '_') sql = self.sql.strip().strip(';') hql = """\ set mapred.output.compress=false; set hive.exec.compress.output=false; DROP TABLE IF EXISTS {hive_table}; CREATE TABLE {hive_table} ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE TBLPROPERTIES ('serialization.null.format' = '') AS {sql} """.format(**locals()) logging.info("Running command:\n {}".format(hql)) hive.run_cli(hql) m = HiveMetastoreHook(self.metastore_conn_id) t = m.get_table(hive_table) columns = [col.name for col in t.sd.cols] hdfs_uri = m.get_table(hive_table).sd.location pos = hdfs_uri.find('/user') static_path = hdfs_uri[pos:] schema, table = hive_table.split('.') druid = DruidHook(druid_ingest_conn_id=self.druid_ingest_conn_id) logging.info("Inserting rows into Druid") logging.info("HDFS path: " + static_path) try: druid.load_from_hdfs( datasource=self.druid_datasource, intervals=self.intervals, static_path=static_path, ts_dim=self.ts_dim, columns=columns, num_shards=self.num_shards, target_partition_size=self.target_partition_size, query_granularity=self.query_granularity, segment_granularity=self.segment_granularity, metric_spec=self.metric_spec, hadoop_dependency_coordinates=self.hadoop_dependency_coordinates) logging.info("Load seems to have succeeded!") finally: logging.info( "Cleaning up by dropping the temp " "Hive table {}".format(hive_table)) hql = "DROP TABLE IF EXISTS {}".format(hive_table) hive.run_cli(hql)
hoosteeno/mozillians
refs/heads/master
vendor-local/lib/python/celery/loaders/app.py
14
# -*- coding: utf-8 -*- """ celery.loaders.app ~~~~~~~~~~~~~~~~~~ The default loader used with custom app instances. :copyright: (c) 2009 - 2012 by Ask Solem. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import from .base import BaseLoader class AppLoader(BaseLoader): def on_worker_init(self): self.import_default_modules() def read_configuration(self): return {}
provaleks/o8
refs/heads/8.0
openerp/addons/base/res/res_currency.py
4
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import re import time import math from openerp import api, fields as fields2 from openerp import tools from openerp.osv import fields, osv from openerp.tools import float_round, float_is_zero, float_compare from openerp.tools.translate import _ import simplejson as json CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?') class res_currency(osv.osv): def _current_rate(self, cr, uid, ids, name, arg, context=None): return self._get_current_rate(cr, uid, ids, context=context) def _current_rate_silent(self, cr, uid, ids, name, arg, context=None): return self._get_current_rate(cr, uid, ids, raise_on_no_rate=False, context=context) def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None): if context is None: context = {} res = {} date = context.get('date') or time.strftime('%Y-%m-%d') for id in ids: cr.execute('SELECT rate FROM res_currency_rate ' 'WHERE currency_id = %s ' 'AND name <= %s ' 'ORDER BY name desc LIMIT 1', (id, date)) if cr.rowcount: res[id] = cr.fetchone()[0] elif not raise_on_no_rate: res[id] = 0 else: currency = self.browse(cr, uid, id, context=context) raise osv.except_osv(_('Error!'),_("No currency rate associated for currency '%s' for the given period" % (currency.name))) return res _name = "res.currency" _description = "Currency" _columns = { # Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code. 'name': fields.char('Currency', size=3, required=True, help="Currency Code (ISO 4217)"), 'symbol': fields.char('Symbol', size=4, help="Currency sign, to be used when printing amounts."), 'rate': fields.function(_current_rate, string='Current Rate', digits=(12,6), help='The rate of the currency to the currency of rate 1.'), # Do not use for computation ! Same as rate field with silent failing 'rate_silent': fields.function(_current_rate_silent, string='Current Rate', digits=(12,6), help='The rate of the currency to the currency of rate 1 (0 if no rate defined).'), 'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'), 'accuracy': fields.integer('Computational Accuracy'), 'rounding': fields.float('Rounding Factor', digits=(12,6)), 'active': fields.boolean('Active'), 'company_id':fields.many2one('res.company', 'Company'), 'base': fields.boolean('Base'), 'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.") } _defaults = { 'active': 1, 'position' : 'after', 'rounding': 0.01, 'accuracy': 4, 'company_id': False, } _sql_constraints = [ # this constraint does not cover all cases due to SQL NULL handling for company_id, # so it is complemented with a unique index (see below). The constraint and index # share the same prefix so that IntegrityError triggered by the index will be caught # and reported to the user with the constraint's error message. ('unique_name_company_id', 'unique (name, company_id)', 'The currency code must be unique per company!'), ] _order = "name" def init(self, cr): # CONSTRAINT/UNIQUE INDEX on (name,company_id) # /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92 # only support field names in constraint definitions, and we need a function here: # we need to special-case company_id to treat all NULL company_id as equal, otherwise # we would allow duplicate "global" currencies (all having company_id == NULL) cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'res_currency_unique_name_company_id_idx'""") if not cr.fetchone(): cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx ON res_currency (name, (COALESCE(company_id,-1)))""") date = fields2.Date(compute='compute_date') @api.one @api.depends('rate_ids.name') def compute_date(self): self.date = self.rate_ids[:1].name def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): if not args: args = [] results = super(res_currency,self)\ .name_search(cr, user, name, args, operator=operator, context=context, limit=limit) if not results: name_match = CURRENCY_DISPLAY_PATTERN.match(name) if name_match: results = super(res_currency,self)\ .name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit) return results def name_get(self, cr, uid, ids, context=None): if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write') return [(x['id'], tools.ustr(x['name'])) for x in reads] @api.v8 def round(self, amount): """ Return `amount` rounded according to currency `self`. """ return float_round(amount, precision_rounding=self.rounding) @api.v7 def round(self, cr, uid, currency, amount): """Return ``amount`` rounded according to ``currency``'s rounding rules. :param Record currency: currency for which we are rounding :param float amount: the amount to round :return: rounded float """ return float_round(amount, precision_rounding=currency.rounding) @api.v8 def compare_amounts(self, amount1, amount2): """ Compare `amount1` and `amount2` after rounding them according to `self`'s precision. An amount is considered lower/greater than another amount if their rounded value is different. This is not the same as having a non-zero difference! For example 1.432 and 1.431 are equal at 2 digits precision, so this method would return 0. However 0.006 and 0.002 are considered different (returns 1) because they respectively round to 0.01 and 0.0, even though 0.006-0.002 = 0.004 which would be considered zero at 2 digits precision. """ return float_compare(amount1, amount2, precision_rounding=self.rounding) @api.v7 def compare_amounts(self, cr, uid, currency, amount1, amount2): """Compare ``amount1`` and ``amount2`` after rounding them according to the given currency's precision.. An amount is considered lower/greater than another amount if their rounded value is different. This is not the same as having a non-zero difference! For example 1.432 and 1.431 are equal at 2 digits precision, so this method would return 0. However 0.006 and 0.002 are considered different (returns 1) because they respectively round to 0.01 and 0.0, even though 0.006-0.002 = 0.004 which would be considered zero at 2 digits precision. :param Record currency: currency for which we are rounding :param float amount1: first amount to compare :param float amount2: second amount to compare :return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than, equal to, or greater than ``amount2``, according to ``currency``'s rounding. """ return float_compare(amount1, amount2, precision_rounding=currency.rounding) @api.v8 def is_zero(self, amount): """ Return true if `amount` is small enough to be treated as zero according to currency `self`'s rounding rules. Warning: ``is_zero(amount1-amount2)`` is not always equivalent to ``compare_amounts(amount1,amount2) == 0``, as the former will round after computing the difference, while the latter will round before, giving different results, e.g., 0.006 and 0.002 at 2 digits precision. """ return float_is_zero(amount, precision_rounding=self.rounding) @api.v7 def is_zero(self, cr, uid, currency, amount): """Returns true if ``amount`` is small enough to be treated as zero according to ``currency``'s rounding rules. Warning: ``is_zero(amount1-amount2)`` is not always equivalent to ``compare_amounts(amount1,amount2) == 0``, as the former will round after computing the difference, while the latter will round before, giving different results for e.g. 0.006 and 0.002 at 2 digits precision. :param Record currency: currency for which we are rounding :param float amount: amount to compare with currency's zero """ return float_is_zero(amount, precision_rounding=currency.rounding) def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None): if context is None: context = {} ctx = context.copy() from_currency = self.browse(cr, uid, from_currency.id, context=ctx) to_currency = self.browse(cr, uid, to_currency.id, context=ctx) if from_currency.rate == 0 or to_currency.rate == 0: date = context.get('date', time.strftime('%Y-%m-%d')) if from_currency.rate == 0: currency_symbol = from_currency.symbol else: currency_symbol = to_currency.symbol raise osv.except_osv(_('Error'), _('No rate found \n' \ 'for the currency: %s \n' \ 'at the date: %s') % (currency_symbol, date)) return to_currency.rate/from_currency.rate def _compute(self, cr, uid, from_currency, to_currency, from_amount, round=True, context=None): if (to_currency.id == from_currency.id): if round: return self.round(cr, uid, to_currency, from_amount) else: return from_amount else: rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context) if round: return self.round(cr, uid, to_currency, from_amount * rate) else: return from_amount * rate @api.v7 def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount, round=True, context=None): context = context or {} if not from_currency_id: from_currency_id = to_currency_id if not to_currency_id: to_currency_id = from_currency_id xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context) from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1] to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1] return self._compute(cr, uid, from_currency, to_currency, from_amount, round, context) @api.v8 def compute(self, from_amount, to_currency, round=True): """ Convert `from_amount` from currency `self` to `to_currency`. """ assert self, "compute from unknown currency" assert to_currency, "compute to unknown currency" # apply conversion rate if self == to_currency: to_amount = from_amount else: to_amount = from_amount * self._get_conversion_rate(self, to_currency) # apply rounding return to_currency.round(to_amount) if round else to_amount def get_format_currencies_js_function(self, cr, uid, context=None): """ Returns a string that can be used to instanciate a javascript function that formats numbers as currencies. That function expects the number as first parameter and the currency id as second parameter. In case of failure it returns undefined.""" function = "" for row in self.search_read(cr, uid, domain=[], fields=['id', 'name', 'symbol', 'rounding', 'position'], context=context): digits = int(math.ceil(math.log10(1 / row['rounding']))) symbol = row['symbol'] or row['name'] format_number_str = "openerp.web.format_value(arguments[0], {type: 'float', digits: [69," + str(digits) + "]}, 0.00)" if row['position'] == 'after': return_str = "return " + format_number_str + " + '\\xA0' + " + json.dumps(symbol) + ";" else: return_str = "return " + json.dumps(symbol) + " + '\\xA0' + " + format_number_str + ";" function += "if (arguments[1] === " + str(row['id']) + ") { " + return_str + " }" return function class res_currency_rate(osv.osv): _name = "res.currency.rate" _description = "Currency Rate" _columns = { 'name': fields.datetime('Date', required=True, select=True), 'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'), 'currency_id': fields.many2one('res.currency', 'Currency', readonly=True), } _defaults = { 'name': lambda *a: time.strftime('%Y-%m-%d 00:00:00'), } _order = "name desc" def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80): if operator in ['=', '!=']: try: date_format = '%Y-%m-%d' if context.get('lang'): lang_obj = self.pool['res.lang'] lang_ids = lang_obj.search(cr, user, [('code', '=', context['lang'])], context=context) if lang_ids: date_format = lang_obj.browse(cr, user, lang_ids[0], context=context).date_format name = time.strftime('%Y-%m-%d', time.strptime(name, date_format)) except ValueError: try: args.append(('rate', operator, float(name))) except ValueError: return [] name = '' operator = 'ilike' return super(res_currency_rate, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
ThiagoGarciaAlves/intellij-community
refs/heads/master
python/testData/mover/innerIf_afterUp.py
80
if value is not None: print "here" if not False or value <= 2: pass
grpc/grpc
refs/heads/master
bazel/test/python_test_repo/namespaced/upper/example/import_no_strip_test.py
10
# Copyright 2020 the gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest class ImportTest(unittest.TestCase): def test_import(self): from foo.bar.namespaced.upper.example.namespaced_example_pb2 import NamespacedExample namespaced_example = NamespacedExample() namespaced_example.value = "hello" # Dummy assert, important part is namespaced example was imported. self.assertEqual(namespaced_example.value, "hello") def test_grpc(self): from foo.bar.namespaced.upper.example.namespaced_example_pb2_grpc import NamespacedServiceStub # No error from import self.assertEqual(1, 1) if __name__ == '__main__': logging.basicConfig() unittest.main()
zhouyao1994/incubator-superset
refs/heads/master
superset/views/log/__init__.py
1
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=C,R,W from flask_babel import lazy_gettext as _ class LogMixin: list_title = _("Logs") show_title = _("Show Log") add_title = _("Add Log") edit_title = _("Edit Log") list_columns = ("user", "action", "dttm") edit_columns = ("user", "action", "dttm", "json") base_order = ("dttm", "desc") label_columns = { "user": _("User"), "action": _("Action"), "dttm": _("dttm"), "json": _("JSON"), }
vertcoin/eyeglass
refs/heads/master
contrib/seeds/makeseeds.py
24
#!/usr/bin/env python # # Generate pnSeed[] from Bengt's DNS seeder # NSEEDS=600 import re import sys from subprocess import check_output def main(): lines = sys.stdin.readlines() ips = [] pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):5889") for line in lines: m = pattern.match(line) if m is None: continue ip = 0 for i in range(0,4): ip = ip + (int(m.group(i+1)) << (8*(i))) if ip == 0: continue ips.append(ip) for row in range(0, min(NSEEDS,len(ips)), 6): print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+6] ]) + "," if __name__ == '__main__': main()
jjscarafia/odoo
refs/heads/master
addons/hr_timesheet/hr_timesheet.py
37
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields from openerp.osv import osv from openerp.tools.translate import _ import openerp class hr_employee(osv.osv): _name = "hr.employee" _inherit = "hr.employee" _columns = { 'product_id': fields.many2one('product.product', 'Product', help="If you want to reinvoice working time of employees, link this employee to a service to determinate the cost price of the job."), 'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'), 'uom_id': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Unit of Measure', store=True, readonly=True) } def _getAnalyticJournal(self, cr, uid, context=None): md = self.pool.get('ir.model.data') try: dummy, res_id = md.get_object_reference(cr, uid, 'hr_timesheet', 'analytic_journal') #search on id found in result to check if current user has read access right check_right = self.pool.get('account.analytic.journal').search(cr, uid, [('id', '=', res_id)], context=context) if check_right: return res_id except ValueError: pass return False def _getEmployeeProduct(self, cr, uid, context=None): md = self.pool.get('ir.model.data') try: dummy, res_id = md.get_object_reference(cr, uid, 'product', 'product_product_consultant') #search on id found in result to check if current user has read access right check_right = self.pool.get('product.template').search(cr, uid, [('id', '=', res_id)], context=context) if check_right: return res_id except ValueError: pass return False _defaults = { 'journal_id': _getAnalyticJournal, 'product_id': _getEmployeeProduct } class hr_analytic_timesheet(osv.osv): _name = "hr.analytic.timesheet" _table = 'hr_analytic_timesheet' _description = "Timesheet Line" _inherits = {'account.analytic.line': 'line_id'} _order = "id desc" _columns = { 'line_id': fields.many2one('account.analytic.line', 'Analytic Line', ondelete='cascade', required=True), 'partner_id': fields.related('account_id', 'partner_id', type='many2one', string='Partner', relation='res.partner', store=True), } def unlink(self, cr, uid, ids, context=None): toremove = {} for obj in self.browse(cr, uid, ids, context=context): toremove[obj.line_id.id] = True super(hr_analytic_timesheet, self).unlink(cr, uid, ids, context=context) self.pool.get('account.analytic.line').unlink(cr, uid, toremove.keys(), context=context) return True def on_change_unit_amount(self, cr, uid, id, prod_id, unit_amount, company_id, unit=False, journal_id=False, context=None): res = {'value':{}} if prod_id and unit_amount: # find company company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=context) r = self.pool.get('account.analytic.line').on_change_unit_amount(cr, uid, id, prod_id, unit_amount, company_id, unit, journal_id, context=context) if r: res.update(r) # update unit of measurement if prod_id: uom = self.pool.get('product.product').browse(cr, uid, prod_id, context=context) if uom.uom_id: res['value'].update({'product_uom_id': uom.uom_id.id}) else: res['value'].update({'product_uom_id': False}) return res def _getEmployeeProduct(self, cr, uid, context=None): if context is None: context = {} emp_obj = self.pool.get('hr.employee') emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context) if emp_id: emp = emp_obj.browse(cr, uid, emp_id[0], context=context) if emp.product_id: return emp.product_id.id return False def _getEmployeeUnit(self, cr, uid, context=None): emp_obj = self.pool.get('hr.employee') if context is None: context = {} emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context) if emp_id: emp = emp_obj.browse(cr, uid, emp_id[0], context=context) if emp.product_id: return emp.product_id.uom_id.id return False def _getGeneralAccount(self, cr, uid, context=None): emp_obj = self.pool.get('hr.employee') if context is None: context = {} emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context) if emp_id: emp = emp_obj.browse(cr, uid, emp_id[0], context=context) if bool(emp.product_id): a = emp.product_id.property_account_expense.id if not a: a = emp.product_id.categ_id.property_account_expense_categ.id if a: return a return False def _getAnalyticJournal(self, cr, uid, context=None): emp_obj = self.pool.get('hr.employee') if context is None: context = {} if context.get('employee_id'): emp_id = [context.get('employee_id')] else: emp_id = emp_obj.search(cr, uid, [('user_id','=',context.get('user_id') or uid)], limit=1, context=context) if not emp_id: model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'hr', 'open_view_employee_list_my') msg = _("Employee is not created for this user. Please create one from configuration panel.") raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel')) emp = emp_obj.browse(cr, uid, emp_id[0], context=context) if emp.journal_id: return emp.journal_id.id else : raise osv.except_osv(_('Warning!'), _('No analytic journal defined for \'%s\'.\nYou should assign an analytic journal on the employee form.')%(emp.name)) _defaults = { 'product_uom_id': _getEmployeeUnit, 'product_id': _getEmployeeProduct, 'general_account_id': _getGeneralAccount, 'journal_id': _getAnalyticJournal, 'date': lambda self, cr, uid, ctx: ctx.get('date', fields.date.context_today(self,cr,uid,context=ctx)), 'user_id': lambda obj, cr, uid, ctx: ctx.get('user_id') or uid, } def on_change_account_id(self, cr, uid, ids, account_id, context=None): return {'value':{}} def on_change_date(self, cr, uid, ids, date): if ids: new_date = self.read(cr, uid, ids[0], ['date'])['date'] if date != new_date: warning = {'title':'User Alert!','message':'Changing the date will let this entry appear in the timesheet of the new date.'} return {'value':{},'warning':warning} return {'value':{}} def create(self, cr, uid, vals, context=None): if context is None: context = {} emp_obj = self.pool.get('hr.employee') emp_id = emp_obj.search(cr, uid, [('user_id', '=', context.get('user_id') or uid)], context=context) ename = '' if emp_id: ename = emp_obj.browse(cr, uid, emp_id[0], context=context).name if not vals.get('journal_id',False): raise osv.except_osv(_('Warning!'), _('No \'Analytic Journal\' is defined for employee %s \nDefine an employee for the selected user and assign an \'Analytic Journal\'!')%(ename,)) if not vals.get('account_id',False): raise osv.except_osv(_('Warning!'), _('No analytic account is defined on the project.\nPlease set one or we cannot automatically fill the timesheet.')) return super(hr_analytic_timesheet, self).create(cr, uid, vals, context=context) def on_change_user_id(self, cr, uid, ids, user_id): if not user_id: return {} context = {'user_id': user_id} return {'value': { 'product_id': self. _getEmployeeProduct(cr, uid, context), 'product_uom_id': self._getEmployeeUnit(cr, uid, context), 'general_account_id': self._getGeneralAccount(cr, uid, context), 'journal_id': self._getAnalyticJournal(cr, uid, context), }} class account_analytic_account(osv.osv): _inherit = 'account.analytic.account' _description = 'Analytic Account' _columns = { 'use_timesheets': fields.boolean('Timesheets', help="Check this field if this project manages timesheets"), } def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None): res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context) if template_id and 'value' in res: template = self.browse(cr, uid, template_id, context=context) res['value']['use_timesheets'] = template.use_timesheets return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
poorboy44/Data-Science-45min-Intros
refs/heads/master
python-logging-201/dog.py
26
import logging from pet import Pet logger = logging.getLogger("pet_world." + __name__) class Dog(Pet): def __init__(self, **kwargs): # default values self.name = "Fido" self.word = "Arf!" self.legs = 4 super(Dog,self).__init__(**kwargs) self.logger = logger self.logger.info("{} is ready for play!".format(self.name))
HesselTjeerdsma/Cyber-Physical-Pacman-Game
refs/heads/master
Algor/flask/lib/python2.7/site-packages/docutils/languages/zh_cn.py
148
# -*- coding: utf-8 -*- # $Id: zh_cn.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: Pan Junyong <[email protected]> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Simplified Chinese language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' labels = { # fixed: language-dependent 'author': u'作者', 'authors': u'作者群', 'organization': u'组织', 'address': u'地址', 'contact': u'联系', 'version': u'版本', 'revision': u'修订', 'status': u'状态', 'date': u'日期', 'copyright': u'版权', 'dedication': u'献辞', 'abstract': u'摘要', 'attention': u'注意', 'caution': u'小心', 'danger': u'危险', 'error': u'错误', 'hint': u'提示', 'important': u'重要', 'note': u'注解', 'tip': u'技巧', 'warning': u'警告', 'contents': u'目录', } """Mapping of node class name to label text.""" bibliographic_fields = { # language-dependent: fixed u'作者': 'author', u'作者群': 'authors', u'组织': 'organization', u'地址': 'address', u'联系': 'contact', u'版本': 'version', u'修订': 'revision', u'状态': 'status', u'时间': 'date', u'版权': 'copyright', u'献辞': 'dedication', u'摘要': 'abstract'} """Simplified Chinese to canonical name mapping for bibliographic fields.""" author_separators = [';', ',', u'\uff1b', # ';' u'\uff0c', # ',' u'\u3001', # '、' ] """List of separator strings for the 'Authors' bibliographic field. Tried in order."""
pgonda/servo
refs/heads/master
tests/wpt/css-tests/tools/sslutils/pregenerated.py
470
class PregeneratedSSLEnvironment(object): """SSL environment to use with existing key/certificate files e.g. when running on a server with a public domain name """ ssl_enabled = True def __init__(self, logger, host_key_path, host_cert_path, ca_cert_path=None): self._ca_cert_path = ca_cert_path self._host_key_path = host_key_path self._host_cert_path = host_cert_path def __enter__(self): return self def __exit__(self, *args, **kwargs): pass def host_cert_path(self, hosts): """Return the key and certificate paths for the host""" return self._host_key_path, self._host_cert_path def ca_cert_path(self): """Return the certificate path of the CA that signed the host certificates, or None if that isn't known""" return self._ca_cert_path
ToonTownInfiniteRepo/ToontownInfinite
refs/heads/master
toontown/minigame/FogOverlay.py
4
from pandac.PandaModules import * from toontown.toonbase.ToonBaseGlobal import * from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import * from direct.fsm import ClassicFSM, State from direct.fsm import State from direct.task import Task from toontown.toonbase import ToontownGlobals import math from math import * class FogOverlay: SomeCounter = 0 def __init__(self, color = Point3(1.0, 1.0, 1.0)): self.color = color self.opacity = 1.0 self.setup() def setup(self): self.baseNode = aspect2d.attachNewNode('targetGameTargets') self.overlayGN = GeomNode('Overlay Geometry') self.overlayNodePathGeom = self.baseNode.attachNewNode(self.overlayGN) self.overlayNodePathGeom.setDepthWrite(False) self.overlayNodePathGeom.setTransparency(TransparencyAttrib.MAlpha) shapeVertexs = [] shapeVertexs.append((-2.0, 0.0, 1.0)) shapeVertexs.append((-2.0, 0.0, -1.0)) shapeVertexs.append((2.0, 0.0, 1.0)) shapeVertexs.append((2.0, 0.0, -1.0)) gFormat = GeomVertexFormat.getV3cp() overlayVertexData = GeomVertexData('holds my vertices', gFormat, Geom.UHDynamic) overlayVertexWriter = GeomVertexWriter(overlayVertexData, 'vertex') overlayColorWriter = GeomVertexWriter(overlayVertexData, 'color') for index in xrange(len(shapeVertexs)): overlayVertexWriter.addData3f(shapeVertexs[index][0], shapeVertexs[index][1], shapeVertexs[index][2]) overlayColorWriter.addData4f(1.0, 1.0, 1.0, 1.0) overlayTris = GeomTristrips(Geom.UHStatic) for index in xrange(len(shapeVertexs)): overlayTris.addVertex(index) overlayTris.closePrimitive() overlayGeom = Geom(overlayVertexData) overlayGeom.addPrimitive(overlayTris) self.overlayGN.addGeom(overlayGeom) def setOpacity(self, opacity): self.opacity = opacity self.__applyColor() def setColor(self, color): self.color = color self.__applyColor() def __applyColor(self): self.overlayNodePathGeom.setColorScale(self.color[0], self.color[1], self.color[2], self.opacity) def delete(self): self.overlayGN.removeAllGeoms() self.baseNode.removeNode()
hkernbach/arangodb
refs/heads/devel
3rdParty/V8/v5.7.492.77/tools/gyp/test/no-output/gyptest-no-output.py
349
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verified things don't explode when there are targets without outputs. """ import TestGyp # TODO(evan): in ninja when there are no targets, there is no 'all' # target either. Disabling this test for now. test = TestGyp.TestGyp(formats=['!ninja']) test.run_gyp('nooutput.gyp', chdir='src') test.relocate('src', 'relocate/src') test.build('nooutput.gyp', chdir='relocate/src') test.pass_test()
pshen/ansible
refs/heads/devel
test/units/playbook/test_taggable.py
119
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.playbook.taggable import Taggable from units.mock.loader import DictDataLoader class TaggableTestObj(Taggable): def __init__(self): self._loader = DictDataLoader({}) self.tags = [] class TestTaggable(unittest.TestCase): def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags): taggable_obj = TaggableTestObj() taggable_obj.tags = tags evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {}) self.assertEqual(test_value, evaluate) def test_evaluate_tags_tag_in_only_tags(self): self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], []) def test_evaluate_tags_tag_in_skip_tags(self): self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1']) def test_evaluate_tags_special_always_in_object_tags(self): self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], []) def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self): self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag']) def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self): self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always']) def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self): self.assert_evaluate_equal(True, ['tag'], ['tagged'], []) def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self): self.assert_evaluate_equal(False, [], ['tagged'], []) def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self): self.assert_evaluate_equal(False, ['tag'], [], ['tagged']) def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self): self.assert_evaluate_equal(True, [], [], ['tagged']) def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self): self.assert_evaluate_equal(False, ['tag'], ['untagged'], []) def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self): self.assert_evaluate_equal(True, [], ['untagged'], []) def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self): self.assert_evaluate_equal(True, ['tag'], [], ['untagged']) def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self): self.assert_evaluate_equal(False, [], [], ['untagged']) def test_evaluate_tags_special_all_in_only_tags(self): self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged']) def test_evaluate_tags_special_all_in_skip_tags(self): self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all']) def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self): self.assert_evaluate_equal(False, ['tag'], ['all'], ['all']) def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self): self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all']) def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self): self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always']) def test_evaluate_tags_accepts_lists(self): self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], []) def test_evaluate_tags_accepts_strings(self): self.assert_evaluate_equal(True, 'tag1,tag2', ['tag2'], []) def test_evaluate_tags_with_repeated_tags(self): self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag'])
william-richard/moto
refs/heads/master
moto/kinesisvideo/__init__.py
2
from __future__ import unicode_literals from .models import kinesisvideo_backends from ..core.models import base_decorator kinesisvideo_backend = kinesisvideo_backends["us-east-1"] mock_kinesisvideo = base_decorator(kinesisvideo_backends)