repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
tobbad/micropython | refs/heads/master | tests/thread/mutate_dict.py | 14 | # test concurrent mutating access to a shared dict object
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# the shared dict
di = {"a": "A", "b": "B", "c": "C", "d": "D"}
# main thread function
def th(n, lo, hi):
for repeat in range(n):
for i in range(lo, hi):
di[i] = repeat + i
assert di[i] == repeat + i
del di[i]
assert i not in di
di[i] = repeat + i
assert di[i] == repeat + i
assert di.pop(i) == repeat + i
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (30, i * 300, (i + 1) * 300))
# busy wait for threads to finish
while n_finished < n_thread:
pass
# check dict has correct contents
print(sorted(di.items()))
|
staranjeet/fjord | refs/heads/master | vendor/packages/translate-toolkit/translate/storage/aresource.py | 3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Michal Čihař
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Module for handling Android String resource files."""
import re
from lxml import etree
from translate.lang import data
from translate.storage import base, lisa
EOF = None
WHITESPACE = ' \n\t' # Whitespace that we collapse.
MULTIWHITESPACE = re.compile('[ \n\t]{2}')
class AndroidResourceUnit(base.TranslationUnit):
"""A single entry in the Android String resource file."""
rootNode = "string"
languageNode = "string"
@classmethod
def createfromxmlElement(cls, element):
term = cls(None, xmlelement = element)
return term
def __init__(self, source, empty=False, xmlelement=None, **kwargs):
if xmlelement is not None:
self.xmlelement = xmlelement
else:
self.xmlelement = etree.Element(self.rootNode)
self.xmlelement.tail = '\n'
if source is not None:
self.setid(source)
super(AndroidResourceUnit, self).__init__(source)
def istranslatable(self):
return (
bool(self.getid())
and self.xmlelement.get('translatable') != 'false'
)
def isblank(self):
return not bool(self.getid())
def getid(self):
return self.xmlelement.get("name")
def setid(self, newid):
return self.xmlelement.set("name", newid)
def getcontext(self):
return self.xmlelement.get("name")
def unescape(self, text):
'''
Remove escaping from Android resource.
Code stolen from android2po
<https://github.com/miracle2k/android2po>
'''
# Return text for empty elements
if text is None:
return ''
# We need to collapse multiple whitespace while paying
# attention to Android's quoting and escaping.
space_count = 0
active_quote = False
active_percent = False
active_escape = False
formatted = False
i = 0
text = list(text) + [EOF]
while i < len(text):
c = text[i]
# Handle whitespace collapsing
if c is not EOF and c in WHITESPACE:
space_count += 1
elif space_count > 1:
# Remove duplicate whitespace; Pay attention: We
# don't do this if we are currently inside a quote,
# except for one special case: If we have unbalanced
# quotes, e.g. we reach eof while a quote is still
# open, we *do* collapse that trailing part; this is
# how Android does it, for some reason.
if not active_quote or c is EOF:
# Replace by a single space, will get rid of
# non-significant newlines/tabs etc.
text[i-space_count : i] = ' '
i -= space_count - 1
space_count = 0
elif space_count == 1:
# At this point we have a single whitespace character,
# but it might be a newline or tab. If we write this
# kind of insignificant whitespace into the .po file,
# it will be considered significant on import. So,
# make sure that this kind of whitespace is always a
# standard space.
text[i-1] = ' '
space_count = 0
else:
space_count = 0
# Handle quotes
if c == '"' and not active_escape:
active_quote = not active_quote
del text[i]
i -= 1
# If the string is run through a formatter, it will have
# percentage signs for String.format
if c == '%' and not active_escape:
active_percent = not active_percent
elif not active_escape and active_percent:
formatted = True
active_percent = False
# Handle escapes
if c == '\\':
if not active_escape:
active_escape = True
else:
# A double-backslash represents a single;
# simply deleting the current char will do.
del text[i]
i -= 1
active_escape = False
else:
if active_escape:
# Handle the limited amount of escape codes
# that we support.
# TODO: What about \r, or \r\n?
if c is EOF:
# Basically like any other char, but put
# this first so we can use the ``in`` operator
# in the clauses below without issue.
pass
elif c == 'n' or c == 'N':
text[i-1 : i+1] = '\n' # an actual newline
i -= 1
elif c == 't' or c == 'T':
text[i-1 : i+1] = '\t' # an actual tab
i -= 1
elif c == ' ':
text[i-1 : i+1] = ' ' # an actual space
i -= 1
elif c in '"\'@':
text[i-1 : i] = '' # remove the backslash
i -= 1
elif c == 'u':
# Unicode sequence. Android is nice enough to deal
# with those in a way which let's us just capture
# the next 4 characters and raise an error if they
# are not valid (rather than having to use a new
# state to parse the unicode sequence).
# Exception: In case we are at the end of the
# string, we support incomplete sequences by
# prefixing the missing digits with zeros.
# Note: max(len()) is needed in the slice due to
# trailing ``None`` element.
max_slice = min(i+5, len(text)-1)
codepoint_str = "".join(text[i+1 : max_slice])
if len(codepoint_str) < 4:
codepoint_str = u"0" * (4-len(codepoint_str)) + codepoint_str
try:
# We can't trust int() to raise a ValueError,
# it will ignore leading/trailing whitespace.
if not codepoint_str.isalnum():
raise ValueError(codepoint_str)
codepoint = unichr(int(codepoint_str, 16))
except ValueError:
raise ValueError('bad unicode escape sequence')
text[i-1 : max_slice] = codepoint
i -= 1
else:
# All others, remove, like Android does as well.
text[i-1 : i+1] = ''
i -= 1
active_escape = False
i += 1
# Join the string together again, but w/o EOF marker
return "".join(text[:-1])
def escape(self, text):
'''
Escape all the characters which need to be escaped in an Android XML file.
'''
if text is None:
return
if len(text) == 0:
return ''
text = text.replace('\\', '\\\\')
text = text.replace('\n', '\\n')
# This will add non intrusive real newlines to
# ones in translation improving readability of result
text = text.replace(' \\n', '\n\\n')
text = text.replace('\t', '\\t')
text = text.replace('\'', '\\\'')
text = text.replace('"', '\\"')
# @ needs to be escaped at start
if text.startswith('@'):
text = '\\@' + text[1:]
# Quote strings with more whitespace
if text[0] in WHITESPACE or text[-1] in WHITESPACE or len(MULTIWHITESPACE.findall(text)) > 0:
return '"%s"' % text
return text
def setsource(self, source):
super(AndroidResourceUnit, self).setsource(source)
def getsource(self, lang=None):
if (super(AndroidResourceUnit, self).source is None):
return self.target
else:
return super(AndroidResourceUnit, self).source
source = property(getsource, setsource)
def settarget(self, target):
if '<' in target:
# Handle text with possible markup
target = target.replace('&', '&')
try:
# Try as XML
newstring = etree.fromstring('<string>%s</string>' % target)
except:
# Fallback to string with XML escaping
target = target.replace('<', '<')
newstring = etree.fromstring('<string>%s</string>' % target)
# Update text
if newstring.text is None:
self.xmlelement.text = ''
else:
self.xmlelement.text = newstring.text
# Remove old elements
for x in self.xmlelement.iterchildren():
self.xmlelement.remove(x)
# Add new elements
for x in newstring.iterchildren():
self.xmlelement.append(x)
else:
# Handle text only
self.xmlelement.text = self.escape(target)
super(AndroidResourceUnit, self).settarget(target)
def gettarget(self, lang=None):
# Grab inner text
target = self.unescape(self.xmlelement.text or u'')
# Include markup as well
target += u''.join([data.forceunicode(etree.tostring(child, encoding='utf-8')) for child in self.xmlelement.iterchildren()])
return target
target = property(gettarget, settarget)
def getlanguageNode(self, lang=None, index=None):
return self.xmlelement
# Notes are handled as previous sibling comments.
def addnote(self, text, origin=None, position="append"):
if origin in ['programmer', 'developer', 'source code', None]:
self.xmlelement.addprevious(etree.Comment(text))
else:
return super(AndroidResourceUnit, self).addnote(text, origin=origin,
position=position)
def getnotes(self, origin=None):
if origin in ['programmer', 'developer', 'source code', None]:
comments = []
if (self.xmlelement is not None):
prevSibling = self.xmlelement.getprevious()
while ((prevSibling is not None) and (prevSibling.tag is etree.Comment)):
comments.insert(0, prevSibling.text)
prevSibling = prevSibling.getprevious()
return u'\n'.join(comments)
else:
return super(AndroidResourceUnit, self).getnotes(origin)
def removenotes(self):
if ((self.xmlelement is not None) and (self.xmlelement.getparent is not None)):
prevSibling = self.xmlelement.getprevious()
while ((prevSibling is not None) and (prevSibling.tag is etree.Comment)):
prevSibling.getparent().remove(prevSibling)
prevSibling = self.xmlelement.getprevious()
super(AndroidResourceUnit, self).removenotes()
def __str__(self):
return etree.tostring(self.xmlelement, pretty_print=True,
encoding='utf-8')
def __eq__(self, other):
return (str(self) == str(other))
class AndroidResourceFile(lisa.LISAfile):
"""Class representing an Android String resource file store."""
UnitClass = AndroidResourceUnit
Name = _("Android String Resource")
Mimetypes = ["application/xml"]
Extensions = ["xml"]
rootNode = "resources"
bodyNode = "resources"
XMLskeleton = '''<?xml version="1.0" encoding="utf-8"?>
<resources></resources>'''
def initbody(self):
"""Initialises self.body so it never needs to be retrieved from the
XML again."""
self.namespace = self.document.getroot().nsmap.get(None, None)
self.body = self.document.getroot()
|
chyeh727/django | refs/heads/master | tests/check_framework/test_templates.py | 288 | from copy import deepcopy
from django.core.checks.templates import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckTemplateSettingsAppDirsTest(SimpleTestCase):
TEMPLATES_APP_DIRS_AND_LOADERS = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'loaders': ['django.template.loaders.filesystem.Loader'],
},
},
]
@property
def func(self):
from django.core.checks.templates import check_setting_app_dirs_loaders
return check_setting_app_dirs_loaders
@override_settings(TEMPLATES=TEMPLATES_APP_DIRS_AND_LOADERS)
def test_app_dirs_and_loaders(self):
"""
Error if template loaders are specified and APP_DIRS is True.
"""
self.assertEqual(self.func(None), [E001])
def test_app_dirs_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['APP_DIRS']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
def test_loaders_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['OPTIONS']['loaders']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
|
UniversalMasterEgg8679/ansible | refs/heads/devel | lib/ansible/modules/network/cumulus/_cl_ports.py | 70 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <[email protected]>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
deprecated: Deprecated in 2.3. Use M(nclu) instead.
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
to not include ports that do not exist on the switch. Carefully read the
original ports.conf file for any exceptions or limitations.
For more details go the Configure Switch Port Attribute Documentation at
U(http://docs.cumulusnetworks.com).
options:
speed_10g:
description:
- List of ports to run initial run at 10G.
speed_40g:
description:
- List of ports to run initial run at 40G.
speed_4_by_10g:
description:
- List of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- List of 10G ports that will be ganged to form a 40G port.
'''
EXAMPLES = '''
# Use cl_ports module to manage the switch attributes defined in the
# ports.conf file on Cumulus Linux
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1
- swp32
speed_40g:
- swp2-31
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1-3
- swp6
speed_40g:
- swp4-5
- swp7-32
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
PORTS_CONF = '/etc/cumulus/ports.conf'
def hash_existing_ports_conf(module):
module.ports_conf_hash = {}
if not os.path.exists(PORTS_CONF):
return False
try:
existing_ports_conf = open(PORTS_CONF).readlines()
except IOError:
error_msg = get_exception()
_msg = "Failed to open %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only should return on module.fail_json
for _line in existing_ports_conf:
_m0 = re.match(r'^(\d+)=(\w+)', _line)
if _m0:
_portnum = int(_m0.group(1))
_speed = _m0.group(2)
module.ports_conf_hash[_portnum] = _speed
def generate_new_ports_conf_hash(module):
new_ports_conf_hash = {}
convert_hash = {
'speed_40g_div_4': '40G/4',
'speed_4_by_10g': '4x10G',
'speed_10g': '10G',
'speed_40g': '40G'
}
for k in module.params.keys():
port_range = module.params[k]
port_setting = convert_hash[k]
if port_range:
port_range = [x for x in port_range if x]
for port_str in port_range:
port_range_str = port_str.replace('swp', '').split('-')
if len(port_range_str) == 1:
new_ports_conf_hash[int(port_range_str[0])] = \
port_setting
else:
int_range = map(int, port_range_str)
portnum_range = range(int_range[0], int_range[1]+1)
for i in portnum_range:
new_ports_conf_hash[i] = port_setting
module.new_ports_hash = new_ports_conf_hash
def compare_new_and_old_port_conf_hash(module):
ports_conf_hash_copy = module.ports_conf_hash.copy()
module.ports_conf_hash.update(module.new_ports_hash)
port_num_length = len(module.ports_conf_hash.keys())
orig_port_num_length = len(ports_conf_hash_copy.keys())
if port_num_length != orig_port_num_length:
module.fail_json(msg="Port numbering is wrong. \
Too many or two few ports configured")
return False
elif ports_conf_hash_copy == module.ports_conf_hash:
return False
return True
def make_copy_of_orig_ports_conf(module):
if os.path.exists(PORTS_CONF + '.orig'):
return
try:
shutil.copyfile(PORTS_CONF, PORTS_CONF + '.orig')
except IOError:
error_msg = get_exception()
_msg = "Failed to save the original %s: %s" % (PORTS_CONF, error_msg)
module.fail_json(msg=_msg)
return # for testing only
def write_to_ports_conf(module):
"""
use tempfile to first write out config in temp file
then write to actual location. may help prevent file
corruption. Ports.conf is a critical file for Cumulus.
Don't want to corrupt this file under any circumstance.
"""
temp = tempfile.NamedTemporaryFile()
try:
try:
temp.write('# Managed By Ansible\n')
for k in sorted(module.ports_conf_hash.keys()):
port_setting = module.ports_conf_hash[k]
_str = "%s=%s\n" % (k, port_setting)
temp.write(_str)
temp.seek(0)
shutil.copyfile(temp.name, PORTS_CONF)
except IOError:
error_msg = get_exception()
module.fail_json(
msg="Failed to write to %s: %s" % (PORTS_CONF, error_msg))
finally:
temp.close()
def main():
module = AnsibleModule(
argument_spec=dict(
speed_40g_div_4=dict(type='list'),
speed_4_by_10g=dict(type='list'),
speed_10g=dict(type='list'),
speed_40g=dict(type='list')
),
required_one_of=[['speed_40g_div_4',
'speed_4_by_10g',
'speed_10g',
'speed_40g']]
)
_changed = False
hash_existing_ports_conf(module)
generate_new_ports_conf_hash(module)
if compare_new_and_old_port_conf_hash(module):
make_copy_of_orig_ports_conf(module)
write_to_ports_conf(module)
_changed = True
_msg = "/etc/cumulus/ports.conf changed"
else:
_msg = 'No change in /etc/ports.conf'
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
# from ansible.module_utils.urls import *
import os
import tempfile
import shutil
if __name__ == '__main__':
main()
|
anryko/ansible | refs/heads/devel | lib/ansible/plugins/shell/sh.py | 63 | # Copyright (c) 2014, Chris Church <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: sh
plugin_type: shell
short_description: "POSIX shell (/bin/sh)"
version_added: historical
description:
- This shell plugin is the one you want to use on most Unix systems, it is the most compatible and widely installed shell.
extends_documentation_fragment:
- shell_common
'''
from ansible.module_utils.six.moves import shlex_quote
from ansible.plugins.shell import ShellBase
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles.
# Note: sh is the default shell plugin so this plugin may also be selected
# This code needs to be SH-compliant. BASH-isms will not work if /bin/sh points to a non-BASH shell.
# if the filename is not listed in any Shell plugin.
COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh'))
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'sh'
# commonly used
ECHO = 'echo'
COMMAND_SEP = ';'
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
_SHELL_AND = '&&'
_SHELL_OR = '||'
_SHELL_SUB_LEFT = '"`'
_SHELL_SUB_RIGHT = '`"'
_SHELL_GROUP_LEFT = '('
_SHELL_GROUP_RIGHT = ')'
def checksum(self, path, python_interp):
# In the following test, each condition is a check and logical
# comparison (|| or &&) that sets the rc value. Every check is run so
# the last check in the series to fail will be the rc that is returned.
#
# If a check fails we error before invoking the hash functions because
# hash functions may successfully take the hash of a directory on BSDs
# (UFS filesystem?) which is not what the rest of the ansible code expects
#
# If all of the available hashing methods fail we fail with an rc of 0.
# This logic is added to the end of the cmd at the bottom of this function.
# Return codes:
# checksum: success!
# 0: Unknown error
# 1: Remote file does not exist
# 2: No read permissions on the file
# 3: File is a directory
# 4: No python interpreter
# Quoting gets complex here. We're writing a python string that's
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = shlex_quote(path)
test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
csums = [
u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
]
cmd = (" %s " % self._SHELL_OR).join(csums)
cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
return cmd
|
FedericoCeratto/debian-pymongo | refs/heads/master | pymongo/monitor.py | 19 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Class to monitor a MongoDB server on a background thread."""
import weakref
from bson.codec_options import DEFAULT_CODEC_OPTIONS
from pymongo import common, helpers, message, periodic_executor
from pymongo.server_type import SERVER_TYPE
from pymongo.ismaster import IsMaster
from pymongo.monotonic import time as _time
from pymongo.read_preferences import MovingAverage
from pymongo.server_description import ServerDescription
class Monitor(object):
def __init__(
self,
server_description,
topology,
pool,
topology_settings):
"""Class to monitor a MongoDB server on a background thread.
Pass an initial ServerDescription, a Topology, a Pool, and
TopologySettings.
The Topology is weakly referenced. The Pool must be exclusive to this
Monitor.
"""
self._server_description = server_description
self._pool = pool
self._settings = topology_settings
self._avg_round_trip_time = MovingAverage()
# We strongly reference the executor and it weakly references us via
# this closure. When the monitor is freed, stop the executor soon.
def target():
monitor = self_ref()
if monitor is None:
return False # Stop the executor.
Monitor._run(monitor)
return True
executor = periodic_executor.PeriodicExecutor(
condition_class=self._settings.condition_class,
interval=common.HEARTBEAT_FREQUENCY,
min_interval=common.MIN_HEARTBEAT_INTERVAL,
target=target)
self._executor = executor
# Avoid cycles. When self or topology is freed, stop executor soon.
self_ref = weakref.ref(self, executor.close)
self._topology = weakref.proxy(topology, executor.close)
def open(self):
"""Start monitoring, or restart after a fork.
Multiple calls have no effect.
"""
self._executor.open()
def close(self):
"""Close and stop monitoring.
open() restarts the monitor after closing.
"""
self._executor.close()
# Increment the pool_id and maybe close the socket. If the executor
# thread has the socket checked out, it will be closed when checked in.
self._pool.reset()
def join(self, timeout=None):
self._executor.join(timeout)
def request_check(self):
"""If the monitor is sleeping, wake and check the server soon."""
self._executor.wake()
def _run(self):
try:
self._server_description = self._check_with_retry()
self._topology.on_change(self._server_description)
except ReferenceError:
# Topology was garbage-collected.
self.close()
def _check_with_retry(self):
"""Call ismaster once or twice. Reset server's pool on error.
Returns a ServerDescription.
"""
# According to the spec, if an ismaster call fails we reset the
# server's pool. If a server was once connected, change its type
# to Unknown only after retrying once.
address = self._server_description.address
retry = self._server_description.server_type != SERVER_TYPE.Unknown
try:
return self._check_once()
except ReferenceError:
raise
except Exception as error:
self._topology.reset_pool(address)
default = ServerDescription(address, error=error)
if not retry:
self._avg_round_trip_time.reset()
# Server type defaults to Unknown.
return default
# Try a second and final time. If it fails return original error.
try:
return self._check_once()
except ReferenceError:
raise
except Exception:
self._avg_round_trip_time.reset()
return default
def _check_once(self):
"""A single attempt to call ismaster.
Returns a ServerDescription, or raises an exception.
"""
with self._pool.get_socket({}) as sock_info:
response, round_trip_time = self._check_with_socket(sock_info)
self._avg_round_trip_time.add_sample(round_trip_time)
sd = ServerDescription(
address=self._server_description.address,
ismaster=response,
round_trip_time=self._avg_round_trip_time.get())
return sd
def _check_with_socket(self, sock_info):
"""Return (IsMaster, round_trip_time).
Can raise ConnectionFailure or OperationFailure.
"""
start = _time()
request_id, msg, max_doc_size = message.query(
0, 'admin.$cmd', 0, -1, {'ismaster': 1},
None, DEFAULT_CODEC_OPTIONS)
# TODO: use sock_info.command()
sock_info.send_message(msg, max_doc_size)
raw_response = sock_info.receive_message(1, request_id)
result = helpers._unpack_response(raw_response)
return IsMaster(result['data'][0]), _time() - start
|
HydrelioxGitHub/home-assistant | refs/heads/dev | tests/util/test_package.py | 13 | """Test Home Assistant package util methods."""
import asyncio
import logging
import os
import sys
from subprocess import PIPE
from unittest.mock import MagicMock, call, patch
import pytest
import homeassistant.util.package as package
TEST_NEW_REQ = 'pyhelloworld3==1.0.0'
@pytest.fixture
def mock_sys():
"""Mock sys."""
with patch('homeassistant.util.package.sys', spec=object) as sys_mock:
sys_mock.executable = 'python3'
yield sys_mock
@pytest.fixture
def deps_dir():
"""Return path to deps directory."""
return os.path.abspath('/deps_dir')
@pytest.fixture
def lib_dir(deps_dir):
"""Return path to lib directory."""
return os.path.join(deps_dir, 'lib_dir')
@pytest.fixture
def mock_popen(lib_dir):
"""Return a Popen mock."""
with patch('homeassistant.util.package.Popen') as popen_mock:
popen_mock.return_value.communicate.return_value = (
bytes(lib_dir, 'utf-8'), b'error')
popen_mock.return_value.returncode = 0
yield popen_mock
@pytest.fixture
def mock_env_copy():
"""Mock os.environ.copy."""
with patch('homeassistant.util.package.os.environ.copy') as env_copy:
env_copy.return_value = {}
yield env_copy
@pytest.fixture
def mock_venv():
"""Mock homeassistant.util.package.is_virtual_env."""
with patch('homeassistant.util.package.is_virtual_env') as mock:
mock.return_value = True
yield mock
@asyncio.coroutine
def mock_async_subprocess():
"""Return an async Popen mock."""
async_popen = MagicMock()
@asyncio.coroutine
def communicate(input=None):
"""Communicate mock."""
stdout = bytes('/deps_dir/lib_dir', 'utf-8')
return (stdout, None)
async_popen.communicate = communicate
return async_popen
def test_install(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install attempt on a package that doesn't exist."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ, False)
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ
], stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_upgrade(
mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an upgrade attempt on a package."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ)
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--upgrade'
], stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target."""
target = 'target_folder'
env = mock_env_copy()
env['PYTHONUSERBASE'] = os.path.abspath(target)
mock_venv.return_value = False
mock_sys.platform = 'linux'
args = [
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--user', '--prefix=']
assert package.install_package(TEST_NEW_REQ, False, target=target)
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target_venv(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target in a virtual environment."""
target = 'target_folder'
with pytest.raises(AssertionError):
package.install_package(TEST_NEW_REQ, False, target=target)
def test_install_error(caplog, mock_sys, mock_popen, mock_venv):
"""Test an install with a target."""
caplog.set_level(logging.WARNING)
mock_popen.return_value.returncode = 1
assert not package.install_package(TEST_NEW_REQ)
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == 'ERROR'
def test_install_constraint(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test install with constraint file on not installed package."""
env = mock_env_copy()
constraints = 'constraints_file.txt'
assert package.install_package(
TEST_NEW_REQ, False, constraints=constraints)
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--constraint', constraints
], stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
@asyncio.coroutine
def test_async_get_user_site(mock_env_copy):
"""Test async get user site directory."""
deps_dir = '/deps_dir'
env = mock_env_copy()
env['PYTHONUSERBASE'] = os.path.abspath(deps_dir)
args = [sys.executable, '-m', 'site', '--user-site']
with patch('homeassistant.util.package.asyncio.create_subprocess_exec',
return_value=mock_async_subprocess()) as popen_mock:
ret = yield from package.async_get_user_site(deps_dir)
assert popen_mock.call_count == 1
assert popen_mock.call_args == call(
*args, stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.DEVNULL,
env=env)
assert ret == os.path.join(deps_dir, 'lib_dir')
|
ScatterHQ/eliot | refs/heads/master | eliot/_action.py | 2 | """
Support for actions and tasks.
Actions have a beginning and an eventual end, and can be nested. Tasks are
top-level actions.
"""
from __future__ import unicode_literals, absolute_import
import threading
from uuid import uuid4
from contextlib import contextmanager
from functools import partial
from inspect import getcallargs
from contextvars import ContextVar
from pyrsistent import field, PClass, optional, pmap_field, pvector
from boltons.funcutils import wraps
from six import text_type as unicode, PY3
from ._message import (
WrittenMessage,
EXCEPTION_FIELD,
REASON_FIELD,
TASK_UUID_FIELD,
MESSAGE_TYPE_FIELD,
)
from ._util import safeunicode
from ._errors import _error_extraction
ACTION_STATUS_FIELD = "action_status"
ACTION_TYPE_FIELD = "action_type"
STARTED_STATUS = "started"
SUCCEEDED_STATUS = "succeeded"
FAILED_STATUS = "failed"
VALID_STATUSES = (STARTED_STATUS, SUCCEEDED_STATUS, FAILED_STATUS)
_ACTION_CONTEXT = ContextVar("eliot.action")
from ._message import TIMESTAMP_FIELD, TASK_LEVEL_FIELD
def current_action():
"""
@return: The current C{Action} in context, or C{None} if none were set.
"""
return _ACTION_CONTEXT.get(None)
class TaskLevel(object):
"""
The location of a message within the tree of actions of a task.
@ivar level: A pvector of integers. Each item indicates a child
relationship, and the value indicates message count. E.g. C{[2,
3]} indicates this is the third message within an action which is
the second item in the task.
"""
def __init__(self, level):
self._level = level
def as_list(self):
"""Return the current level.
@return: List of integers.
"""
return self._level[:]
# Backwards compatibility:
@property
def level(self):
return pvector(self._level)
def __lt__(self, other):
return self._level < other._level
def __le__(self, other):
return self._level <= other._level
def __gt__(self, other):
return self._level > other._level
def __ge__(self, other):
return self._level >= other._level
def __eq__(self, other):
if other.__class__ != TaskLevel:
return False
return self._level == other._level
def __ne__(self, other):
if other.__class__ != TaskLevel:
return True
return self._level != other._level
def __hash__(self):
return hash(tuple(self._level))
@classmethod
def fromString(cls, string):
"""
Convert a serialized Unicode string to a L{TaskLevel}.
@param string: Output of L{TaskLevel.toString}.
@return: L{TaskLevel} parsed from the string.
"""
return cls(level=[int(i) for i in string.split("/") if i])
def toString(self):
"""
Convert to a Unicode string, for serialization purposes.
@return: L{unicode} representation of the L{TaskLevel}.
"""
return "/" + "/".join(map(unicode, self._level))
def next_sibling(self):
"""
Return the next L{TaskLevel}, that is a task at the same level as this
one, but one after.
@return: L{TaskLevel} which follows this one.
"""
new_level = self._level[:]
new_level[-1] += 1
return TaskLevel(level=new_level)
def child(self):
"""
Return a child of this L{TaskLevel}.
@return: L{TaskLevel} which is the first child of this one.
"""
new_level = self._level[:]
new_level.append(1)
return TaskLevel(level=new_level)
def parent(self):
"""
Return the parent of this L{TaskLevel}, or C{None} if it doesn't have
one.
@return: L{TaskLevel} which is the parent of this one.
"""
if not self._level:
return None
return TaskLevel(level=self._level[:-1])
def is_sibling_of(self, task_level):
"""
Is this task a sibling of C{task_level}?
"""
return self.parent() == task_level.parent()
# PEP 8 compatibility:
from_string = fromString
to_string = toString
_TASK_ID_NOT_SUPPLIED = object()
import time
class Action(object):
"""
Part of a nested heirarchy of ongoing actions.
An action has a start and an end; a message is logged for each.
Actions should only be used from a single thread, by implication the
thread where they were created.
@ivar _identification: Fields identifying this action.
@ivar _successFields: Fields to be included in successful finish message.
@ivar _finished: L{True} if the L{Action} has finished, otherwise L{False}.
"""
def __init__(self, logger, task_uuid, task_level, action_type, serializers=None):
"""
Initialize the L{Action} and log the start message.
You probably do not want to use this API directly: use L{start_action}
or L{startTask} instead.
@param logger: The L{eliot.ILogger} to which to write
messages.
@param task_uuid: The uuid of the top-level task, e.g. C{"123525"}.
@param task_level: The action's level in the task.
@type task_level: L{TaskLevel}
@param action_type: The type of the action,
e.g. C{"yourapp:subsystem:dosomething"}.
@param serializers: Either a L{eliot._validation._ActionSerializers}
instance or C{None}. In the latter case no validation or
serialization will be done for messages generated by the
L{Action}.
"""
self._successFields = {}
self._logger = _output._DEFAULT_LOGGER if (logger is None) else logger
self._task_level = task_level
self._last_child = None
self._identification = {
TASK_UUID_FIELD: task_uuid,
ACTION_TYPE_FIELD: action_type,
}
self._serializers = serializers
self._finished = False
@property
def task_uuid(self):
"""
@return str: the current action's task UUID.
"""
return self._identification[TASK_UUID_FIELD]
def serialize_task_id(self):
"""
Create a unique identifier for the current location within the task.
The format is C{b"<task_uuid>@<task_level>"}.
@return: L{bytes} encoding the current location within the task.
"""
return "{}@{}".format(
self._identification[TASK_UUID_FIELD], self._nextTaskLevel().toString()
).encode("ascii")
@classmethod
def continue_task(cls, logger=None, task_id=_TASK_ID_NOT_SUPPLIED):
"""
Start a new action which is part of a serialized task.
@param logger: The L{eliot.ILogger} to which to write
messages, or C{None} if the default one should be used.
@param task_id: A serialized task identifier, the output of
L{Action.serialize_task_id}, either ASCII-encoded bytes or unicode
string. Required.
@return: The new L{Action} instance.
"""
if task_id is _TASK_ID_NOT_SUPPLIED:
raise RuntimeError("You must supply a task_id keyword argument.")
if isinstance(task_id, bytes):
task_id = task_id.decode("ascii")
uuid, task_level = task_id.split("@")
action = cls(
logger, uuid, TaskLevel.fromString(task_level), "eliot:remote_task"
)
action._start({})
return action
# Backwards-compat variants:
serializeTaskId = serialize_task_id
continueTask = continue_task
def _nextTaskLevel(self):
"""
Return the next C{task_level} for messages within this action.
Called whenever a message is logged within the context of an action.
@return: The message's C{task_level}.
"""
if not self._last_child:
self._last_child = self._task_level.child()
else:
self._last_child = self._last_child.next_sibling()
return self._last_child
def _start(self, fields):
"""
Log the start message.
The action identification fields, and any additional given fields,
will be logged.
In general you shouldn't call this yourself, instead using a C{with}
block or L{Action.finish}.
"""
fields[ACTION_STATUS_FIELD] = STARTED_STATUS
fields[TIMESTAMP_FIELD] = time.time()
fields.update(self._identification)
fields[TASK_LEVEL_FIELD] = self._nextTaskLevel().as_list()
if self._serializers is None:
serializer = None
else:
serializer = self._serializers.start
self._logger.write(fields, serializer)
def finish(self, exception=None):
"""
Log the finish message.
The action identification fields, and any additional given fields,
will be logged.
In general you shouldn't call this yourself, instead using a C{with}
block or L{Action.finish}.
@param exception: C{None}, in which case the fields added with
L{Action.addSuccessFields} are used. Or an L{Exception}, in
which case an C{"exception"} field is added with the given
L{Exception} type and C{"reason"} with its contents.
"""
if self._finished:
return
self._finished = True
serializer = None
if exception is None:
fields = self._successFields
fields[ACTION_STATUS_FIELD] = SUCCEEDED_STATUS
if self._serializers is not None:
serializer = self._serializers.success
else:
fields = _error_extraction.get_fields_for_exception(self._logger, exception)
fields[EXCEPTION_FIELD] = "%s.%s" % (
exception.__class__.__module__,
exception.__class__.__name__,
)
fields[REASON_FIELD] = safeunicode(exception)
fields[ACTION_STATUS_FIELD] = FAILED_STATUS
if self._serializers is not None:
serializer = self._serializers.failure
fields[TIMESTAMP_FIELD] = time.time()
fields.update(self._identification)
fields[TASK_LEVEL_FIELD] = self._nextTaskLevel().as_list()
self._logger.write(fields, serializer)
def child(self, logger, action_type, serializers=None):
"""
Create a child L{Action}.
Rather than calling this directly, you can use L{start_action} to
create child L{Action} using the execution context.
@param logger: The L{eliot.ILogger} to which to write
messages.
@param action_type: The type of this action,
e.g. C{"yourapp:subsystem:dosomething"}.
@param serializers: Either a L{eliot._validation._ActionSerializers}
instance or C{None}. In the latter case no validation or
serialization will be done for messages generated by the
L{Action}.
"""
newLevel = self._nextTaskLevel()
return self.__class__(
logger,
self._identification[TASK_UUID_FIELD],
newLevel,
action_type,
serializers,
)
def run(self, f, *args, **kwargs):
"""
Run the given function with this L{Action} as its execution context.
"""
parent = _ACTION_CONTEXT.set(self)
try:
return f(*args, **kwargs)
finally:
_ACTION_CONTEXT.reset(parent)
def addSuccessFields(self, **fields):
"""
Add fields to be included in the result message when the action
finishes successfully.
@param fields: Additional fields to add to the result message.
"""
self._successFields.update(fields)
# PEP 8 variant:
add_success_fields = addSuccessFields
@contextmanager
def context(self):
"""
Create a context manager that ensures code runs within action's context.
The action does NOT finish when the context is exited.
"""
parent = _ACTION_CONTEXT.set(self)
try:
yield self
finally:
_ACTION_CONTEXT.reset(parent)
# Python context manager implementation:
def __enter__(self):
"""
Push this action onto the execution context.
"""
self._parent_token = _ACTION_CONTEXT.set(self)
return self
def __exit__(self, type, exception, traceback):
"""
Pop this action off the execution context, log finish message.
"""
_ACTION_CONTEXT.reset(self._parent_token)
self._parent_token = None
self.finish(exception)
## Message logging
def log(self, message_type, **fields):
"""Log individual message."""
fields[TIMESTAMP_FIELD] = time.time()
fields[TASK_UUID_FIELD] = self._identification[TASK_UUID_FIELD]
fields[TASK_LEVEL_FIELD] = self._nextTaskLevel().as_list()
fields[MESSAGE_TYPE_FIELD] = message_type
self._logger.write(fields, fields.pop("__eliot_serializer__", None))
class WrongTask(Exception):
"""
Tried to add a message to an action, but the message was from another
task.
"""
def __init__(self, action, message):
Exception.__init__(
self,
"Tried to add {} to {}. Expected task_uuid = {}, got {}".format(
message, action, action.task_uuid, message.task_uuid
),
)
class WrongTaskLevel(Exception):
"""
Tried to add a message to an action, but the task level of the message
indicated that it was not a direct child.
"""
def __init__(self, action, message):
Exception.__init__(
self,
"Tried to add {} to {}, but {} is not a sibling of {}".format(
message, action, message.task_level, action.task_level
),
)
class WrongActionType(Exception):
"""
Tried to end a message with a different action_type than the beginning.
"""
def __init__(self, action, message):
error_msg = "Tried to end {} with {}. Expected action_type = {}, got {}"
Exception.__init__(
self,
error_msg.format(
action,
message,
action.action_type,
message.contents.get(ACTION_TYPE_FIELD, "<undefined>"),
),
)
class InvalidStatus(Exception):
"""
Tried to end a message with an invalid status.
"""
def __init__(self, action, message):
error_msg = "Tried to end {} with {}. Expected status {} or {}, got {}"
Exception.__init__(
self,
error_msg.format(
action,
message,
SUCCEEDED_STATUS,
FAILED_STATUS,
message.contents.get(ACTION_STATUS_FIELD, "<undefined>"),
),
)
class DuplicateChild(Exception):
"""
Tried to add a child to an action that already had a child at that task
level.
"""
def __init__(self, action, message):
Exception.__init__(
self,
"Tried to add {} to {}, but already had child at {}".format(
message, action, message.task_level
),
)
class InvalidStartMessage(Exception):
"""
Tried to start an action with an invalid message.
"""
def __init__(self, message, reason):
Exception.__init__(self, "Invalid start message {}: {}".format(message, reason))
@classmethod
def wrong_status(cls, message):
return cls(message, 'must have status "STARTED"')
@classmethod
def wrong_task_level(cls, message):
return cls(message, "first message must have task level ending in 1")
class WrittenAction(PClass):
"""
An Action that has been logged.
This class is intended to provide a definition within Eliot of what an
action actually is, and a means of constructing actions that are known to
be valid.
@ivar WrittenMessage start_message: A start message whose task UUID and
level match this action, or C{None} if it is not yet set on the
action.
@ivar WrittenMessage end_message: An end message hose task UUID and
level match this action. Can be C{None} if the action is
unfinished.
@ivar TaskLevel task_level: The action's task level, e.g. if start
message has level C{[2, 3, 1]} it will be
C{TaskLevel(level=[2, 3])}.
@ivar UUID task_uuid: The UUID of the task to which this action belongs.
@ivar _children: A L{pmap} from L{TaskLevel} to the L{WrittenAction} and
L{WrittenMessage} objects that make up this action.
"""
start_message = field(type=optional(WrittenMessage), mandatory=True, initial=None)
end_message = field(type=optional(WrittenMessage), mandatory=True, initial=None)
task_level = field(type=TaskLevel, mandatory=True)
task_uuid = field(type=unicode, mandatory=True, factory=unicode)
# Pyrsistent doesn't support pmap_field with recursive types.
_children = pmap_field(TaskLevel, object)
@classmethod
def from_messages(cls, start_message=None, children=pvector(), end_message=None):
"""
Create a C{WrittenAction} from C{WrittenMessage}s and other
C{WrittenAction}s.
@param WrittenMessage start_message: A message that has
C{ACTION_STATUS_FIELD}, C{ACTION_TYPE_FIELD}, and a C{task_level}
that ends in C{1}, or C{None} if unavailable.
@param children: An iterable of C{WrittenMessage} and C{WrittenAction}
@param WrittenMessage end_message: A message that has the same
C{action_type} as this action.
@raise WrongTask: If C{end_message} has a C{task_uuid} that differs
from C{start_message.task_uuid}.
@raise WrongTaskLevel: If any child message or C{end_message} has a
C{task_level} that means it is not a direct child.
@raise WrongActionType: If C{end_message} has an C{ACTION_TYPE_FIELD}
that differs from the C{ACTION_TYPE_FIELD} of C{start_message}.
@raise InvalidStatus: If C{end_message} doesn't have an
C{action_status}, or has one that is not C{SUCCEEDED_STATUS} or
C{FAILED_STATUS}.
@raise InvalidStartMessage: If C{start_message} does not have a
C{ACTION_STATUS_FIELD} of C{STARTED_STATUS}, or if it has a
C{task_level} indicating that it is not the first message of an
action.
@return: A new C{WrittenAction}.
"""
actual_message = [
message
for message in [start_message, end_message] + list(children)
if message
][0]
action = cls(
task_level=actual_message.task_level.parent(),
task_uuid=actual_message.task_uuid,
)
if start_message:
action = action._start(start_message)
for child in children:
if action._children.get(child.task_level, child) != child:
raise DuplicateChild(action, child)
action = action._add_child(child)
if end_message:
action = action._end(end_message)
return action
@property
def action_type(self):
"""
The type of this action, e.g. C{"yourapp:subsystem:dosomething"}.
"""
if self.start_message:
return self.start_message.contents[ACTION_TYPE_FIELD]
elif self.end_message:
return self.end_message.contents[ACTION_TYPE_FIELD]
else:
return None
@property
def status(self):
"""
One of C{STARTED_STATUS}, C{SUCCEEDED_STATUS}, C{FAILED_STATUS} or
C{None}.
"""
message = self.end_message if self.end_message else self.start_message
if message:
return message.contents[ACTION_STATUS_FIELD]
else:
return None
@property
def start_time(self):
"""
The Unix timestamp of when the action started, or C{None} if there has
been no start message added so far.
"""
if self.start_message:
return self.start_message.timestamp
@property
def end_time(self):
"""
The Unix timestamp of when the action ended, or C{None} if there has been
no end message.
"""
if self.end_message:
return self.end_message.timestamp
@property
def exception(self):
"""
If the action failed, the name of the exception that was raised to cause
it to fail. If the action succeeded, or hasn't finished yet, then
C{None}.
"""
if self.end_message:
return self.end_message.contents.get(EXCEPTION_FIELD, None)
@property
def reason(self):
"""
The reason the action failed. If the action succeeded, or hasn't finished
yet, then C{None}.
"""
if self.end_message:
return self.end_message.contents.get(REASON_FIELD, None)
@property
def children(self):
"""
The list of child messages and actions sorted by task level, excluding the
start and end messages.
"""
return pvector(sorted(self._children.values(), key=lambda m: m.task_level))
def _validate_message(self, message):
"""
Is C{message} a valid direct child of this action?
@param message: Either a C{WrittenAction} or a C{WrittenMessage}.
@raise WrongTask: If C{message} has a C{task_uuid} that differs from the
action's C{task_uuid}.
@raise WrongTaskLevel: If C{message} has a C{task_level} that means
it's not a direct child.
"""
if message.task_uuid != self.task_uuid:
raise WrongTask(self, message)
if not message.task_level.parent() == self.task_level:
raise WrongTaskLevel(self, message)
def _add_child(self, message):
"""
Return a new action with C{message} added as a child.
Assumes C{message} is not an end message.
@param message: Either a C{WrittenAction} or a C{WrittenMessage}.
@raise WrongTask: If C{message} has a C{task_uuid} that differs from the
action's C{task_uuid}.
@raise WrongTaskLevel: If C{message} has a C{task_level} that means
it's not a direct child.
@return: A new C{WrittenAction}.
"""
self._validate_message(message)
level = message.task_level
return self.transform(("_children", level), message)
def _start(self, start_message):
"""
Start this action given its start message.
@param WrittenMessage start_message: A start message that has the
same level as this action.
@raise InvalidStartMessage: If C{start_message} does not have a
C{ACTION_STATUS_FIELD} of C{STARTED_STATUS}, or if it has a
C{task_level} indicating that it is not the first message of an
action.
"""
if start_message.contents.get(ACTION_STATUS_FIELD, None) != STARTED_STATUS:
raise InvalidStartMessage.wrong_status(start_message)
if start_message.task_level.level[-1] != 1:
raise InvalidStartMessage.wrong_task_level(start_message)
return self.set(start_message=start_message)
def _end(self, end_message):
"""
End this action with C{end_message}.
Assumes that the action has not already been ended.
@param WrittenMessage end_message: An end message that has the
same level as this action.
@raise WrongTask: If C{end_message} has a C{task_uuid} that differs
from the action's C{task_uuid}.
@raise WrongTaskLevel: If C{end_message} has a C{task_level} that means
it's not a direct child.
@raise InvalidStatus: If C{end_message} doesn't have an
C{action_status}, or has one that is not C{SUCCEEDED_STATUS} or
C{FAILED_STATUS}.
@return: A new, completed C{WrittenAction}.
"""
action_type = end_message.contents.get(ACTION_TYPE_FIELD, None)
if self.action_type not in (None, action_type):
raise WrongActionType(self, end_message)
self._validate_message(end_message)
status = end_message.contents.get(ACTION_STATUS_FIELD, None)
if status not in (FAILED_STATUS, SUCCEEDED_STATUS):
raise InvalidStatus(self, end_message)
return self.set(end_message=end_message)
def start_action(logger=None, action_type="", _serializers=None, **fields):
"""
Create a child L{Action}, figuring out the parent L{Action} from execution
context, and log the start message.
You can use the result as a Python context manager, or use the
L{Action.finish} API to explicitly finish it.
with start_action(logger, "yourapp:subsystem:dosomething",
entry=x) as action:
do(x)
result = something(x * 2)
action.addSuccessFields(result=result)
Or alternatively:
action = start_action(logger, "yourapp:subsystem:dosomething",
entry=x)
with action.context():
do(x)
result = something(x * 2)
action.addSuccessFields(result=result)
action.finish()
@param logger: The L{eliot.ILogger} to which to write messages, or
C{None} to use the default one.
@param action_type: The type of this action,
e.g. C{"yourapp:subsystem:dosomething"}.
@param _serializers: Either a L{eliot._validation._ActionSerializers}
instance or C{None}. In the latter case no validation or serialization
will be done for messages generated by the L{Action}.
@param fields: Additional fields to add to the start message.
@return: A new L{Action}.
"""
parent = current_action()
if parent is None:
return startTask(logger, action_type, _serializers, **fields)
else:
action = parent.child(logger, action_type, _serializers)
action._start(fields)
return action
def startTask(logger=None, action_type="", _serializers=None, **fields):
"""
Like L{action}, but creates a new top-level L{Action} with no parent.
@param logger: The L{eliot.ILogger} to which to write messages, or
C{None} to use the default one.
@param action_type: The type of this action,
e.g. C{"yourapp:subsystem:dosomething"}.
@param _serializers: Either a L{eliot._validation._ActionSerializers}
instance or C{None}. In the latter case no validation or serialization
will be done for messages generated by the L{Action}.
@param fields: Additional fields to add to the start message.
@return: A new L{Action}.
"""
action = Action(
logger, unicode(uuid4()), TaskLevel(level=[]), action_type, _serializers
)
action._start(fields)
return action
class TooManyCalls(Exception):
"""
The callable was called more than once.
This typically indicates a coding bug: the result of
C{preserve_context} should only be called once, and
C{preserve_context} should therefore be called each time you want to
pass the callable to a thread.
"""
def preserve_context(f):
"""
Package up the given function with the current Eliot context, and then
restore context and call given function when the resulting callable is
run. This allows continuing the action context within a different thread.
The result should only be used once, since it relies on
L{Action.serialize_task_id} whose results should only be deserialized
once.
@param f: A callable.
@return: One-time use callable that calls given function in context of
a child of current Eliot action.
"""
action = current_action()
if action is None:
return f
task_id = action.serialize_task_id()
called = threading.Lock()
def restore_eliot_context(*args, **kwargs):
# Make sure the function has not already been called:
if not called.acquire(False):
raise TooManyCalls(f)
with Action.continue_task(task_id=task_id):
return f(*args, **kwargs)
return restore_eliot_context
def log_call(
wrapped_function=None, action_type=None, include_args=None, include_result=True
):
"""Decorator/decorator factory that logs inputs and the return result.
If used with inputs (i.e. as a decorator factory), it accepts the following
parameters:
@param action_type: The action type to use. If not given the function name
will be used.
@param include_args: If given, should be a list of strings, the arguments to log.
@param include_result: True by default. If False, the return result isn't logged.
"""
if wrapped_function is None:
return partial(
log_call,
action_type=action_type,
include_args=include_args,
include_result=include_result,
)
if action_type is None:
if PY3:
action_type = "{}.{}".format(
wrapped_function.__module__, wrapped_function.__qualname__
)
else:
action_type = wrapped_function.__name__
if PY3 and include_args is not None:
from inspect import signature
sig = signature(wrapped_function)
if set(include_args) - set(sig.parameters):
raise ValueError(
(
"include_args ({}) lists arguments not in the " "wrapped function"
).format(include_args)
)
@wraps(wrapped_function)
def logging_wrapper(*args, **kwargs):
callargs = getcallargs(wrapped_function, *args, **kwargs)
# Remove self is it's included:
if "self" in callargs:
callargs.pop("self")
# Filter arguments to log, if necessary:
if include_args is not None:
callargs = {k: callargs[k] for k in include_args}
with start_action(action_type=action_type, **callargs) as ctx:
result = wrapped_function(*args, **kwargs)
if include_result:
ctx.add_success_fields(result=result)
return result
return logging_wrapper
def log_message(message_type, **fields):
"""Log a message in the context of the current action.
If there is no current action, a new UUID will be generated.
"""
# Loggers will hopefully go away...
logger = fields.pop("__eliot_logger__", None)
action = current_action()
if action is None:
action = Action(logger, str(uuid4()), TaskLevel(level=[]), "")
action.log(message_type, **fields)
from . import _output
|
jsgf/xen | refs/heads/master | tools/xm-test/tests/console/02_console_baddom_neg.py | 42 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Li Ge <[email protected]>
# Test Description:
# Negative Tests:
# Test for attempting to connect to non existent domname, domid. Verify fail.
import re
from XmTestLib import *
status, output = traceCommand("xm console 5000")
eyecatcher = "Error"
where = output.find(eyecatcher)
if status == 0:
FAIL("xm console returned invalid %i != 0" % status)
elif where == -1:
FAIL("xm console failed to report error on bad domid")
status, output = traceCommand("xm console NON_EXIST")
eyecatcher = "Error"
where = output.find(eyecatcher)
if status == 0:
FAIL("xm console returned invalid %i != 0" % status)
elif where == -1:
FAIL("xm console failed to report error on bad domname")
|
lmazuel/azure-sdk-for-python | refs/heads/master | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/network_profile.py | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkProfile(Model):
"""Specifies the network interfaces of the virtual machine.
:param network_interfaces: Specifies the list of resource Ids for the
network interfaces associated with the virtual machine.
:type network_interfaces:
list[~azure.mgmt.compute.v2016_04_30_preview.models.NetworkInterfaceReference]
"""
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[NetworkInterfaceReference]'},
}
def __init__(self, **kwargs):
super(NetworkProfile, self).__init__(**kwargs)
self.network_interfaces = kwargs.get('network_interfaces', None)
|
svebk/DeepSentiBank_memex | refs/heads/master | scripts/precompute_similar_images_parallel.py | 1 | import os
import sys
import time
import datetime
import happybase
# parallel
#from multiprocessing import JoinableQueue as Queue
from multiprocessing import Queue
from multiprocessing import Process
sys.path.append('..')
import cu_image_search
from cu_image_search.search import searcher_hbaseremote
nb_workers = 20
time_sleep = 60
queue_timeout = 600
# should we try/except main loop of producer, consumer and finalizer?
def end_producer(queueIn):
print "[producer-pid({}): log] ending producer at {}".format(os.getpid(), get_now())
for i in range(nb_workers):
# sentinel value, one for each worker
queueIn.put((None, None, None))
def producer(global_conf_file, queueIn, queueProducer):
print "[producer-pid({}): log] Started a producer worker at {}".format(os.getpid(), get_now())
sys.stdout.flush()
searcher_producer = searcher_hbaseremote.Searcher(global_conf_file)
print "[producer-pid({}): log] Producer worker ready at {}".format(os.getpid(), get_now())
queueProducer.put("Producer ready")
while True:
try:
start_get_batch = time.time()
update_id, str_list_sha1s = searcher_producer.indexer.get_next_batch_precomp_sim()
#queueProducer.put("Producer got batch")
print "[producer-pid({}): log] Got batch in {}s at {}".format(os.getpid(), time.time() - start_get_batch, get_now())
sys.stdout.flush()
if update_id is None:
print "[producer-pid({}): log] No more update to process.".format(os.getpid())
return end_producer(queueIn)
else:
start_precomp = time.time()
# check that sha1s of batch have no precomputed similarities already in sha1_infos table
valid_sha1s, not_indexed_sha1s, precomp_sim_sha1s = check_indexed_noprecomp(searcher_producer, str_list_sha1s.split(','))
# should we split valid_sha1s in batches of 100 or something smaller than 10K currently?
searcher_producer.indexer.write_batch([(update_id, {searcher_producer.indexer.precomp_start_marker: 'True'})], searcher_producer.indexer.table_updateinfos_name)
# push updates to be processed in queueIn
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue.qsize
# qsize raises NotImplemented Error on OS X...
#print "[producer: log] Pushing update {} in queue containing {} items at {}.".format(update_id, queueIn.qsize(), get_now())
print "[producer-pid({}): log] Pushing update {} at {}.".format(os.getpid(), update_id, get_now())
sys.stdout.flush()
queueIn.put((update_id, valid_sha1s, start_precomp))
print "[producer-pid({}): log] Pushed update {} to queueIn at {}.".format(os.getpid(), update_id, get_now())
sys.stdout.flush()
except Exception as inst:
print "[producer-pid({}): error] Error at {}. Leaving. Error was: {}".format(os.getpid(), get_now(), inst)
return end_producer(queueIn)
def end_consumer(queueIn, queueOut):
print "[consumer-pid({}): log] ending consumer at {}".format(os.getpid(), get_now())
#queueIn.task_done()
queueOut.put((None, None, None, None, None, None))
def consumer(global_conf_file, queueIn, queueOut, queueConsumer):
print "[consumer-pid({}): log] Started a consumer worker at {}".format(os.getpid(), get_now())
sys.stdout.flush()
searcher_consumer = searcher_hbaseremote.Searcher(global_conf_file)
print "[consumer-pid({}): log] Consumer worker ready at {}".format(os.getpid(), get_now())
queueConsumer.put("Consumer ready")
sys.stdout.flush()
while True:
try:
## reads from queueIn
print "[consumer-pid({}): log] Consumer worker waiting for update at {}".format(os.getpid(), get_now())
sys.stdout.flush()
update_id, valid_sha1s, start_precomp = queueIn.get(True, queue_timeout)
if update_id is None:
# declare worker ended
print "[consumer-pid({}): log] Consumer worker ending at {}".format(os.getpid(), get_now())
return end_consumer(queueIn, queueOut)
## search
print "[consumer-pid({}): log] Consumer worker computing similarities for {} valid sha1s of update {} at {}".format(os.getpid(), len(valid_sha1s), update_id, get_now())
sys.stdout.flush()
start_search = time.time()
# precompute similarities using searcher
# for v1 check_indexed_noprecomp
#simname, corrupted = searcher_consumer.search_from_sha1_list_get_simname(valid_sha1s, update_id)
simname, corrupted = searcher_consumer.search_from_listid_get_simname(valid_sha1s, update_id, check_already_computed=True)
elapsed_search = time.time() - start_search
print "[consumer-pid({}): log] Consumer worker processed update {} at {}. Search performed in {}s.".format(os.getpid(), update_id, get_now(), elapsed_search)
sys.stdout.flush()
## push to queueOut
#queueIn.task_done()
start_push = time.time()
queueOut.put((update_id, simname, valid_sha1s, corrupted, start_precomp, elapsed_search))
print "[consumer-pid({}): log] Consumer worker pushed update {} to queueOut in {}s at {}.".format(os.getpid(), update_id, time.time()-start_push, get_now())
sys.stdout.flush()
except Exception as inst:
print "[consumer-pid({}): error] Consumer worker caught error at {}. Error was {}".format(os.getpid(), get_now(), inst)
#return end_consumer(queueIn, queueOut)
def end_finalizer(queueOut, queueFinalizer):
print "[finalizer-pid({}): log] ending finalizer at {}".format(os.getpid(), get_now())
queueFinalizer.put("Finalizer ended")
#queueOut.close()
def finalizer(global_conf_file, queueOut, queueFinalizer):
print "[finalizer-pid({}): log] Started a finalizer worker at {}".format(os.getpid(), get_now())
sys.stdout.flush()
searcher_finalizer = searcher_hbaseremote.Searcher(global_conf_file)
print "[finalizer-pid({}): log] Finalizer worker ready at {}".format(os.getpid(), get_now())
queueFinalizer.put("Finalizer ready")
count_workers_ended = 0
while True:
try:
## Read from queueOut
print "[finalizer-pid({}): log] Finalizer worker waiting for an update at {}".format(os.getpid(), get_now())
sys.stdout.flush()
# This seems to block (or not getting updates info) even if there are items that have been pushed to the queueOut??
update_id, simname, valid_sha1s, corrupted, start_precomp, elapsed_search = queueOut.get(block=True, timeout=queue_timeout)
if update_id is None:
count_workers_ended += 1
print "[finalizer-pid({}): log] {} consumer workers ended out of {} at {}.".format(os.getpid(), count_workers_ended, nb_workers, get_now())
#queueOut.task_done()
if count_workers_ended == nb_workers:
# fully done
print "[finalizer-pid({}): log] All consumer workers ended at {}. Leaving.".format(os.getpid(), get_now())
return end_finalizer(queueOut, queueFinalizer)
continue
print "[finalizer-pid({}): log] Finalizer worker got update {} from queueOut to finalize at {}".format(os.getpid(), update_id, get_now())
sys.stdout.flush()
## Check if update was not already finished by another finalizer?
## Push computed similarities
print simname
# format for saving in HBase:
# - batch_sim: should be a list of sha1 row key, dict of "s:similar_sha1": dist_value
# - batch_mark_precomp_sim: should be a list of sha1 row key, dict of precomp_sim_column: True
batch_sim, batch_mark_precomp_sim = format_batch_sim(simname, valid_sha1s, corrupted, searcher_finalizer)
# push similarities to HBI_table_sim (escorts_images_similar_row_dev) using searcher.indexer.write_batch
if batch_sim:
searcher_finalizer.indexer.write_batch(batch_sim, searcher_finalizer.indexer.table_sim_name)
# push to weekly update table for Amandeep to integrate in DIG
week, year = get_week_year()
weekly_sim_table_name = searcher_finalizer.indexer.table_sim_name+"_Y{}W{}".format(year, week)
print "[finalizer-pid({}): log] weekly table name: {}".format(os.getpid(), weekly_sim_table_name)
weekly_sim_table = searcher_finalizer.indexer.get_create_table(weekly_sim_table_name, families={'s': dict()})
searcher_finalizer.indexer.write_batch(batch_sim, weekly_sim_table_name)
## Mark as done
# mark precomp_sim true in escorts_images_sha1_infos_dev
searcher_finalizer.indexer.write_batch(batch_mark_precomp_sim, searcher_finalizer.indexer.table_sha1infos_name)
# mark info:precomp_finish in escorts_images_updates_dev
if not corrupted: # do not mark finished if we faced some issue? mark as corrupted?
searcher_finalizer.indexer.write_batch([(update_id, {searcher_finalizer.indexer.precomp_end_marker: 'True'})],
searcher_finalizer.indexer.table_updateinfos_name)
print "[finalizer-pid({}): log] Finalize update {} at {} in {}s total.".format(os.getpid(), update_id, get_now(), time.time() - start_precomp)
sys.stdout.flush()
## Cleanup
if simname:
try:
# remove simname
os.remove(simname)
# remove features file
featfirst = simname.split('sim')[0]
featfn = featfirst[:-1]+'.dat'
#print "[process_one_update: log] Removing file {}".format(featfn)
os.remove(featfn)
except Exception as inst:
print "[finalizer-pid({}): error] Could not cleanup. Error was: {}".format(os.getpid(), inst)
#queueOut.task_done()
except Exception as inst:
#[finalizer: error] Caught error at 2017-04-14:04.29.23. Leaving. Error was: list index out of range
print "[finalizer-pid({}): error] Caught error at {}. Error {} was: {}".format(os.getpid(), get_now(), type(inst), inst)
# now we catch timeout too, so we are no longer leaving...
#return end_finalizer(queueOut, queueFinalizer)
def get_now():
return datetime.datetime.now().strftime("%Y-%m-%d:%H.%M.%S")
def get_week(today=datetime.datetime.now()):
return today.strftime("%W")
def get_year(today=datetime.datetime.now()):
return today.strftime("%Y")
def get_week_year(today=datetime.datetime.now()):
week = get_week(today)
year = get_year(today)
return week, year
def check_indexed_noprecomp(searcher, list_sha1s):
print "[check_indexed_noprecomp: log] verifying validity of list_sha1s."
sys.stdout.flush()
columns_check = [searcher.indexer.cu_feat_id_column, searcher.indexer.precomp_sim_column]
# Is this blocking in parallel mode?
rows = searcher.indexer.get_columns_from_sha1_rows(list_sha1s, columns=columns_check)
not_indexed_sha1s = []
precomp_sim_sha1s = []
valid_sha1s = []
for row in rows:
#print row
# check searcher.indexer.cu_feat_id_column exists
if searcher.indexer.cu_feat_id_column not in row[1]:
not_indexed_sha1s.append(str(row[0]))
print "[check_indexed_noprecomp: log] found unindexed image {}".format(str(row[0]))
sys.stdout.flush()
continue
# check searcher.indexer.precomp_sim_column does not exist
if searcher.indexer.precomp_sim_column in row[1]:
precomp_sim_sha1s.append(str(row[0]))
#print "[check_indexed_noprecomp: log] found image {} with already precomputed similar images".format(str(row[0]))
#sys.stdout.flush()
continue
valid_sha1s.append((long(row[1][searcher.indexer.cu_feat_id_column]), str(row[0])))
# v1 was:
#valid_sha1s = list(set(list_sha1s) - set(not_indexed_sha1s) - set(precomp_sim_sha1s))
msg = "{} valid sha1s, {} not indexed sha1s, {} already precomputed similarities sha1s."
print("[check_indexed_noprecomp: log] "+msg.format(len(valid_sha1s), len(not_indexed_sha1s), len(precomp_sim_sha1s)))
sys.stdout.flush()
return valid_sha1s, not_indexed_sha1s, precomp_sim_sha1s
def read_sim_precomp(simname, nb_query, searcher):
# intialization
sim = []
sim_score = []
if simname is not None:
# read similar images
count = 0
f = open(simname);
for line in f:
#sim_index.append([])
nums = line.replace(' \n','').split(' ')
#filter near duplicate here
nums = searcher.filter_near_dup(nums, searcher.near_dup_th)
#print nums
onum = len(nums)/2
n = onum
#print n
if onum==0: # no returned images, e.g. no near duplicate
sim.append(())
sim_score.append([])
continue
# get the sha1s of similar images
sim_infos = [searcher.indexer.sha1_featid_mapping[int(i)] for i in nums[0:n]]
# beware, need to make sure sim and sim_score are still aligned
#print("[read_sim] got {} sim_infos from {} samples".format(len(sim_infos), n))
sim.append(sim_infos)
sim_score.append(nums[onum:onum+n])
count = count + 1
if count == nb_query:
break
f.close()
return sim, sim_score
def format_batch_sim(simname, valid_sha1s, corrupted, searcher):
# format similarities for HBase output
nb_query = len(valid_sha1s) - len(corrupted)
sim, sim_score = read_sim_precomp(simname, nb_query, searcher)
# batch_sim: should be a list of sha1 row key, dict of all "s:similar_sha1": dist_value
batch_sim = []
# batch_mark_precomp_sim: should be a list of sha1 row key, dict of precomp_sim_column: True
batch_mark_precomp_sim = []
if sim:
if len(sim) != len(valid_sha1s) or len(sim_score) != len(valid_sha1s):
print "[format_batch_sim: warning] similarities and queries count are different."
print "[format_batch_sim: warning] corrupted is: {}.".format(corrupted)
return
# deal with corrupted
i_img = 0
for i,id_sha1 in enumerate(valid_sha1s):
# now valid_sha1s is a list of id and sha1 tuples
img_id, sha1 = id_sha1
if sha1 in corrupted:
continue
sim_columns = dict()
for i_sim,sim_img in enumerate(sim[i_img]):
sim_columns["s:"+str(sim_img)] = str(sim_score[i_img][i_sim])
sim_reverse = dict()
sim_reverse["s:"+sha1] = str(sim_score[i_img][i_sim])
batch_sim.append((str(sim_img), sim_reverse))
sim_row = (sha1, sim_columns)
batch_sim.append(sim_row)
batch_mark_precomp_sim.append((sha1,{searcher.indexer.precomp_sim_column: 'True'}))
i_img += 1
#print batch_sim
#print batch_mark_precomp_sim
return batch_sim, batch_mark_precomp_sim
def parallel_precompute(global_conf_file):
# Define queues
queueIn = Queue(nb_workers+2)
queueOut = Queue(nb_workers+8)
queueProducer = Queue()
queueFinalizer = Queue()
queueConsumer = Queue(nb_workers)
# Start finalizer
t = Process(target=finalizer, args=(global_conf_file, queueOut, queueFinalizer))
t.daemon = True
t.start()
# Start consumers
for i in range(nb_workers):
t = Process(target=consumer, args=(global_conf_file, queueIn, queueOut, queueConsumer))
t.daemon = True
t.start()
# Start producer
t = Process(target=producer, args=(global_conf_file, queueIn, queueProducer))
t.daemon = True
t.start()
# Wait for everything to be started properly
producerOK = queueProducer.get()
#queueProducer.task_done()
finalizerOK = queueFinalizer.get()
#queueFinalizer.task_done()
for i in range(nb_workers):
consumerOK = queueConsumer.get()
#queueConsumer.task_done()
print "[parallel_precompute: log] All workers are ready."
sys.stdout.flush()
# Wait for everything to be finished
finalizerEnded = queueFinalizer.get()
print finalizerEnded
return
if __name__ == "__main__":
""" Run precompute similar images based on `conf_file` given as parameter
"""
if len(sys.argv)<2:
print "python precompute_similar_images_parallel.py conf_file"
exit(-1)
global_conf_file = sys.argv[1]
while True:
parallel_precompute(global_conf_file)
print "[precompute_similar_images_parallel: log] Nothing to compute. Sleeping for {}s.".format(time_sleep)
sys.stdout.flush()
time.sleep(time_sleep)
|
atosorigin/ansible | refs/heads/devel | lib/ansible/plugins/lookup/__init__.py | 89 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import abstractmethod
from ansible.errors import AnsibleFileNotFound
from ansible.plugins import AnsiblePlugin
from ansible.utils.display import Display
display = Display()
__all__ = ['LookupBase']
class LookupBase(AnsiblePlugin):
def __init__(self, loader=None, templar=None, **kwargs):
super(LookupBase, self).__init__()
self._loader = loader
self._templar = templar
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
def get_basedir(self, variables):
if 'role_path' in variables:
return variables['role_path']
else:
return self._loader.get_basedir()
@staticmethod
def _flatten(terms):
ret = []
for term in terms:
if isinstance(term, (list, tuple)):
ret.extend(term)
else:
ret.append(term)
return ret
@staticmethod
def _combine(a, b):
results = []
for x in a:
for y in b:
results.append(LookupBase._flatten([x, y]))
return results
@staticmethod
def _flatten_hash_to_list(terms):
ret = []
for key in terms:
ret.append({'key': key, 'value': terms[key]})
return ret
@abstractmethod
def run(self, terms, variables=None, **kwargs):
"""
When the playbook specifies a lookup, this method is run. The
arguments to the lookup become the arguments to this method. One
additional keyword argument named ``variables`` is added to the method
call. It contains the variables available to ansible at the time the
lookup is templated. For instance::
"{{ lookup('url', 'https://toshio.fedorapeople.org/one.txt', validate_certs=True) }}"
would end up calling the lookup plugin named url's run method like this::
run(['https://toshio.fedorapeople.org/one.txt'], variables=available_variables, validate_certs=True)
Lookup plugins can be used within playbooks for looping. When this
happens, the first argument is a list containing the terms. Lookup
plugins can also be called from within playbooks to return their
values into a variable or parameter. If the user passes a string in
this case, it is converted into a list.
Errors encountered during execution should be returned by raising
AnsibleError() with a message describing the error.
Any strings returned by this method that could ever contain non-ascii
must be converted into python's unicode type as the strings will be run
through jinja2 which has this requirement. You can use::
from ansible.module_utils._text import to_text
result_string = to_text(result_string)
"""
pass
def find_file_in_search_path(self, myvars, subdir, needle, ignore_missing=False):
'''
Return a file (needle) in the task's expected search path.
'''
if 'ansible_search_path' in myvars:
paths = myvars['ansible_search_path']
else:
paths = [self.get_basedir(myvars)]
result = None
try:
result = self._loader.path_dwim_relative_stack(paths, subdir, needle)
except AnsibleFileNotFound:
if not ignore_missing:
self._display.warning("Unable to find '%s' in expected paths (use -vvvvv to see paths)" % needle)
return result
|
openhatch/oh-mainline | refs/heads/master | vendor/packages/Django/tests/regressiontests/urlpatterns_reverse/tests.py | 17 | # -*- coding: utf-8 -*-
"""
Unit tests for reverse URL lookups.
"""
from __future__ import absolute_import, unicode_literals
import sys
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (reverse, resolve, get_callable,
get_resolver, NoReverseMatch, Resolver404, ResolverMatch, RegexURLResolver,
RegexURLPattern)
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import redirect
from django.test import TestCase
from django.utils import unittest, six
from . import urlconf_outer, middleware, views
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views will be resolved to the function/class name
('/unnamed/normal/42/37/', 'regressiontests.urlpatterns_reverse.views.empty_view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', 'regressiontests.urlpatterns_reverse.views.ViewClass', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', views.empty_view, ('42','37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', views.empty_view, ('42','37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', views.empty_view, tuple(), {'outer':'78', 'extra':'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/+%5C$*/', [r'+\$*'], {}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('kwargs_view', '/arg_view/', [], {}),
('kwargs_view', '/arg_view/10/', [], {'arg1':10}),
('regressiontests.urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('regressiontests.urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1':10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
)
class NoURLPatternsTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.no_urls'
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', self.urls)
self.assertRaisesMessage(ImproperlyConfigured,
"The included urlconf regressiontests.urlpatterns_reverse.no_urls "\
"doesn't have any patterns in it", getattr, resolver, 'url_patterns')
class URLPatternReverse(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls'
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
def test_prefix_braces(self):
self.assertEqual('/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include', prefix='/{{invalid}}/'))
def test_prefix_parenthesis(self):
self.assertEqual('/bogus%29/includes/non_path_include/',
reverse('non_path_include', prefix='/bogus)/'))
def test_prefix_format_char(self):
self.assertEqual('/bump%2520map/includes/non_path_include/',
reverse('non_path_include', prefix='/bump%20map/'))
class ResolverTests(unittest.TestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced urlconf
resolver = get_resolver('regressiontests.urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'regressiontests.urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertTrue('tried' in e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertTrue(isinstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type']))
if 'name' in e:
if not e['name']:
self.assertTrue(t.name is None, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
class ReverseLazyTest(TestCase):
urls = 'regressiontests.urlpatterns_reverse.reverse_lazy_urls'
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=301)
def test_user_permission_with_lazy_reverse(self):
user = User.objects.create_user('alfred', '[email protected]', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
class ReverseShortcutTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls'
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertTrue(isinstance(res, HttpResponseRedirect))
self.assertEqual(res['Location'], '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertTrue(isinstance(res, HttpResponsePermanentRedirect))
self.assertEqual(res['Location'], '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res['Location'], '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res['Location'], '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res['Location'], '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res['Location'], '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res['Location'], 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res['Location'], '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res['Location'], '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res['Location'], 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views that are added to urlpatterns using include() should be
# reversable by doted path.
self.assertEqual(reverse('regressiontests.urlpatterns_reverse.views.nested_view'), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res['Location'], '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
class NamespaceTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.namespace_urls'
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1':42, 'arg2':37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1':42, 'arg2':37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37,42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37,42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37,42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37,42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using a include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37,42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer':'78', 'extra':'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78','foobar']))
class RequestURLconfTests(TestCase):
def setUp(self):
self.root_urlconf = settings.ROOT_URLCONF
self.middleware_classes = settings.MIDDLEWARE_CLASSES
settings.ROOT_URLCONF = urlconf_outer.__name__
def tearDown(self):
settings.ROOT_URLCONF = self.root_urlconf
settings.MIDDLEWARE_CLASSES = self.middleware_classes
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,'
b'inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
def test_urlconf_overridden(self):
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
)
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
def test_urlconf_overridden_with_null(self):
settings.MIDDLEWARE_CLASSES += (
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
)
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ErrorHandlerResolutionTests(TestCase):
"""Tests for handler404 and handler500"""
def setUp(self):
from django.core.urlresolvers import RegexURLResolver
urlconf = 'regressiontests.urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'regressiontests.urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve404(), handler)
self.assertEqual(self.resolver.resolve500(), handler)
def test_callable_handers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve404(), handler)
self.assertEqual(self.callable_resolver.resolve500(), handler)
class DefaultErrorHandlerTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.urls_without_full_import'
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
class NoRootUrlConfTests(TestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
urls = None
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ResolverMatchTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.namespace_urls'
def test_urlpattern_resolve(self):
for path, name, app_name, namespace, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, name)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.func, func)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
class ErroneousViewTests(TestCase):
urls = 'regressiontests.urlpatterns_reverse.erroneous_urls'
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable/')
def test_erroneous_reverse(self):
"""
Ensure that a useful exception is raised when a regex is invalid in the
URLConf.
Refs #6170.
"""
# The regex error will be hit before NoReverseMatch can be raised
self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah')
class ViewLoadingTests(TestCase):
def test_view_loading(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*",
get_callable,
'regressiontests.urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
self.assertRaises(AttributeError, get_callable,
'regressiontests.urlpatterns_reverse.views_broken.i_am_broken')
|
ravindrapanda/tensorflow | refs/heads/master | tensorflow/python/summary/summary_test.py | 75 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
class ScalarSummaryTest(test.TestCase):
def testScalarSummary(self):
with self.test_session() as s:
i = constant_op.constant(3)
with ops.name_scope('outer'):
im = summary_lib.scalar('inner', i)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'outer/inner')
self.assertEqual(values[0].simple_value, 3.0)
def testScalarSummaryWithFamily(self):
with self.test_session() as s:
i = constant_op.constant(7)
with ops.name_scope('outer'):
im1 = summary_lib.scalar('inner', i, family='family')
self.assertEquals(im1.op.name, 'outer/family/inner')
im2 = summary_lib.scalar('inner', i, family='family')
self.assertEquals(im2.op.name, 'outer/family/inner_1')
sm1, sm2 = s.run([im1, im2])
summary = summary_pb2.Summary()
summary.ParseFromString(sm1)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'family/outer/family/inner')
self.assertEqual(values[0].simple_value, 7.0)
summary.ParseFromString(sm2)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'family/outer/family/inner_1')
self.assertEqual(values[0].simple_value, 7.0)
def testSummarizingVariable(self):
with self.test_session() as s:
c = constant_op.constant(42.0)
v = variables.Variable(c)
ss = summary_lib.scalar('summary', v)
init = variables.global_variables_initializer()
s.run(init)
summ_str = s.run(ss)
summary = summary_pb2.Summary()
summary.ParseFromString(summ_str)
self.assertEqual(len(summary.value), 1)
value = summary.value[0]
self.assertEqual(value.tag, 'summary')
self.assertEqual(value.simple_value, 42.0)
def testImageSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
im = summary_lib.image('inner', i, max_outputs=3)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
def testImageSummaryWithFamily(self):
with self.test_session() as s:
i = array_ops.ones((5, 2, 3, 1))
with ops.name_scope('outer'):
im = summary_lib.image('inner', i, max_outputs=3, family='family')
self.assertEquals(im.op.name, 'outer/family/inner')
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('family/outer/family/inner/image/{}'.format(i)
for i in xrange(3))
self.assertEqual(tags, expected)
def testHistogramSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
summ_op = summary_lib.histogram('inner', i)
summary_str = s.run(summ_op)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'outer/inner')
def testHistogramSummaryWithFamily(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
summ_op = summary_lib.histogram('inner', i, family='family')
self.assertEquals(summ_op.op.name, 'outer/family/inner')
summary_str = s.run(summ_op)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'family/outer/family/inner')
def testAudioSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 3, 4))
with ops.name_scope('outer'):
aud = summary_lib.audio('inner', i, 0.2, max_outputs=3)
summary_str = s.run(aud)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/audio/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
def testAudioSummaryWithFamily(self):
with self.test_session() as s:
i = array_ops.ones((5, 3, 4))
with ops.name_scope('outer'):
aud = summary_lib.audio('inner', i, 0.2, max_outputs=3, family='family')
self.assertEquals(aud.op.name, 'outer/family/inner')
summary_str = s.run(aud)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('family/outer/family/inner/audio/{}'.format(i)
for i in xrange(3))
self.assertEqual(tags, expected)
def testSummaryNameConversion(self):
c = constant_op.constant(3)
s = summary_lib.scalar('name with spaces', c)
self.assertEqual(s.op.name, 'name_with_spaces')
s2 = summary_lib.scalar('name with many $#illegal^: characters!', c)
self.assertEqual(s2.op.name, 'name_with_many___illegal___characters_')
s3 = summary_lib.scalar('/name/with/leading/slash', c)
self.assertEqual(s3.op.name, 'name/with/leading/slash')
def testSummaryWithFamilyMetaGraphExport(self):
with ops.name_scope('outer'):
i = constant_op.constant(11)
summ = summary_lib.scalar('inner', i)
self.assertEquals(summ.op.name, 'outer/inner')
summ_f = summary_lib.scalar('inner', i, family='family')
self.assertEquals(summ_f.op.name, 'outer/family/inner')
metagraph_def, _ = meta_graph.export_scoped_meta_graph(export_scope='outer')
with ops.Graph().as_default() as g:
meta_graph.import_scoped_meta_graph(metagraph_def, graph=g,
import_scope='new_outer')
# The summaries should exist, but with outer scope renamed.
new_summ = g.get_tensor_by_name('new_outer/inner:0')
new_summ_f = g.get_tensor_by_name('new_outer/family/inner:0')
# However, the tags are unaffected.
with self.test_session() as s:
new_summ_str, new_summ_f_str = s.run([new_summ, new_summ_f])
new_summ_pb = summary_pb2.Summary()
new_summ_pb.ParseFromString(new_summ_str)
self.assertEquals('outer/inner', new_summ_pb.value[0].tag)
new_summ_f_pb = summary_pb2.Summary()
new_summ_f_pb.ParseFromString(new_summ_f_str)
self.assertEquals('family/outer/family/inner',
new_summ_f_pb.value[0].tag)
if __name__ == '__main__':
test.main()
|
KingOfBanana/SocialNetworkAI | refs/heads/master | page_get/basic.py | 1 | # coding:utf-8
import os
import time
import signal
import requests
from headers import headers
from db.redis_db import Urls
from db.redis_db import Cookies
from logger.log import crawler, other
from db.login_info import freeze_account
from utils.email_warning import send_email
from page_parse.basic import is_403, is_404, is_complete
from decorators.decorator import timeout_decorator, timeout
from config.conf import get_timeout, get_crawl_interal, get_excp_interal, get_max_retries
time_out = get_timeout()
interal = get_crawl_interal()
max_retries = get_max_retries()
excp_interal = get_excp_interal()
def is_banned(url):
if 'unfreeze' in url or 'accessdeny' in url or 'userblock' in url:
return True
return False
@timeout(200)
@timeout_decorator
def get_page(url, user_verify=True, need_login=True, proxys={}):
"""
:param url: url to be crawled
:param user_verify: if it's ajax url, the value is False, else True
:param need_login: if the url is need to login, the value is True, else False
:return: return '' if exception happens or status_code != 200
"""
crawler.info('the crawling url is {url}'.format(url=url))
count = 0
while count < max_retries:
if need_login:
name_cookies = Cookies.fetch_cookies()
if name_cookies is None:
crawler.warning('no cookies in cookies pool, please find out the reason')
send_email()
os.kill(os.getppid(), signal.SIGTERM)
try:
if need_login:
resp = requests.get(url, headers=headers, cookies=name_cookies[1], timeout=time_out, verify=False)
if "$CONFIG['islogin'] = '0'" in resp.text:
crawler.warning('account {} has been banned'.format(name_cookies[0]))
freeze_account(name_cookies[0], 0)
Cookies.delete_cookies(name_cookies[0])
continue
else:
# resp = requests.get(url, headers=headers, timeout=time_out, verify=False)
# test for proxy
# resp = requests.get(url, headers=headers, timeout=time_out, verify=False, proxies=proxys)
resp = requests.get(url, headers=headers, timeout=time_out, proxies=proxys)
# end
page = resp.text
if page:
page = page.encode('utf-8', 'ignore').decode('utf-8')
else:
continue
# slow down to aviod being banned
time.sleep(interal)
if user_verify:
if is_banned(resp.url) or is_403(page):
crawler.warning('account {} has been banned'.format(name_cookies[0]))
freeze_account(name_cookies[0], 0)
Cookies.delete_cookies(name_cookies[0])
count += 1
continue
if 'verifybmobile' in resp.url:
crawler.warning('account {} has been locked,you should use your phone to unlock it'.
format(name_cookies[0]))
freeze_account(name_cookies[0], -1)
Cookies.delete_cookies(name_cookies[0])
continue
if not is_complete(page):
count += 1
continue
if is_404(page):
crawler.warning('{url} seems to be 404'.format(url=url))
return ''
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError, AttributeError) as e:
crawler.warning('excepitons happens when crawling {},specific infos are {}'.format(url, e))
count += 1
time.sleep(excp_interal)
else:
# Urls.store_crawl_url(url, 1)
return page
crawler.warning('max tries for {},check the url in redis db2'.format(url))
# Urls.store_crawl_url(url, 0)
return ''
__all__ = ['get_page'] |
fjruizruano/ngs-protocols | refs/heads/master | massive_phylogeny_raxml_support.py | 1 | #! /usr/bin/python
from subprocess import call
from Bio import AlignIO
import sys
print "massive_phylogeny_raxml_support.py FastaFile NumberSearches NumberBootstrap NumberThreads Name"
try:
file = sys.argv[1]
except:
file = raw_input("FASTA file name: ")
try:
trees = sys.argv[2]
except:
trees = raw_input("Number of searches: ")
try:
bootstrap = sys.argv[3]
except:
bootstrap = raw_input("Number of bootstrap: ")
try:
threads = sys.argv[4]
except:
threads = raw_input("Number of threads: ")
try:
name = sys.argv[5]
except:
name = raw_input("Introduce_number: ")
AlignIO.convert(file, "fasta", file+".phy", "phylip-relaxed")
file_phy = file + ".phy"
try:
print "raxmlHPC-PTHREADS-AVX -T %s -m GTRCAT -p 12345 -# %s -s %s -n run1" % (threads, trees, file_phy)
call("raxmlHPC-PTHREADS-AVX -T %s -m GTRCAT -p 12345 -# %s -s %s -n run1" % (threads, trees, file_phy), shell=True)
except:
print "IT IS NOT GOOD. PLEASE, CHECK YOUR INPUT FILE(S)"
sys.exit()
call("raxmlHPC-PTHREADS-AVX -T %s -m GTRCAT -p 12345 -b 12345 -# %s -s %s -n run2" % (threads, bootstrap, file_phy), shell=True)
try:
call("raxmlHPC -m GTRCAT -p 12345 -f b -t RAxML_bestTree.run1 -z RAxML_bootstrap.run2 -n %s.run3" % (name), shell=True)
call("rm *.run1*", shell=True)
call("rm *.run2*", shell=True)
print "AND... HERE WE ARE!"
except:
print "SOMETHING HAS GONE BAD. PLEASE, CHECK YOUR INPUT FILE(S)"
sys.exit()
|
ntucllab/striatum | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
import os
from setuptools import setup
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# read the docs could not compile numpy and c extensions
if on_rtd:
setup_requires = []
install_requires = []
else:
setup_requires = [
'nose',
'coverage',
]
install_requires = [
'six',
'numpy',
'scipy',
'matplotlib',
]
long_description = ("See `github <https://github.com/ntucllab/striatum>`_ "
"for more information.")
setup(
name='striatum',
version='0.2.5',
description='Contextual bandit in python',
long_description=long_description,
author='Y.-A. Lin, Y.-Y. Yang',
author_email='[email protected], [email protected]',
url='https://github.com/ntucllab/striatum',
setup_requires=setup_requires,
install_requires=install_requires,
classifiers=[
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
test_suite='nose.collector',
packages=[
'striatum',
'striatum.bandit',
'striatum.storage',
'striatum.utils',
],
package_dir={
'striatum': 'striatum',
'striatum.bandit': 'striatum/bandit',
'striatum.storage': 'striatum/storage',
'striatum.utils': 'striatum/utils',
},
)
|
3dfxsoftware/cbss-addons | refs/heads/master | base_iban/__openerp__.py | 125 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'IBAN Bank Accounts',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module installs the base for IBAN (International Bank Account Number) bank accounts and checks for it's validity.
======================================================================================================================
The ability to extract the correctly represented local accounts from IBAN accounts
with a single statement.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['base_iban_data.xml' , 'base_iban_view.xml'],
'installable': True,
'auto_install': False,
'images': ['images/base_iban1.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bazz-erp/erpnext | refs/heads/master | erpnext/schools/doctype/academic_year/test_academic_year.py | 54 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Academic Year')
class TestAcademicYear(unittest.TestCase):
pass
|
JoeLaMartina/aima-python | refs/heads/master | submissions/McLean/runPuzzles.py | 18 | import search
import submissions.McLean.puzzles as pz
def compare_searchers(problems, header, searchers=[]):
def do(searcher, problem):
p = search.InstrumentedProblem(problem)
goalNode = searcher(p)
return p, goalNode.path_cost
table = [[search.name(s)] + [do(s, p) for p in problems] for s in searchers]
search.print_table(table, header)
compare_searchers(
problems=pz.myPuzzles,
header=['Searcher',
'(<succ/goal/stat/fina>, cost)'
],
searchers=[
search.breadth_first_search,
search.depth_first_graph_search,
]
) |
jbassen/edx-platform | refs/heads/master | lms/djangoapps/branding/__init__.py | 1 | from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from django.conf import settings
from branding_stanford.models import TileConfiguration
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from microsite_configuration import microsite
from staticfiles.storage import staticfiles_storage
def get_visible_courses():
"""
Return the set of CourseDescriptors that should be visible in this branded instance
"""
# In the event we don't want any course tiles displayed
if not getattr(settings, 'DISPLAY_COURSE_TILES', False):
return []
filtered_by_org = microsite.get_value('course_org_filter')
if filtered_by_org:
_courses = modulestore().get_courses(org=filtered_by_org)
else:
_courses = modulestore().get_courses()
courses = [c for c in _courses
if isinstance(c, CourseDescriptor)]
courses = sorted(courses, key=lambda course: course.number)
subdomain = microsite.get_value('subdomain', 'default')
# See if we have filtered course listings in this domain
filtered_visible_ids = None
# this is legacy format which is outside of the microsite feature -- also handle dev case, which should not filter
if hasattr(settings, 'COURSE_LISTINGS') and subdomain in settings.COURSE_LISTINGS and not settings.DEBUG:
filtered_visible_ids = frozenset([SlashSeparatedCourseKey.from_deprecated_string(c) for c in settings.COURSE_LISTINGS[subdomain]])
filtered_by_db = TileConfiguration.objects.filter(
enabled=True,
).values('course_id').order_by('-change_date')
if filtered_by_db:
filtered_by_db_ids = [course['course_id'] for course in filtered_by_db]
filtered_by_db_keys = frozenset([SlashSeparatedCourseKey.from_string(c) for c in filtered_by_db_ids])
return [course for course in courses if course.id in filtered_by_db_keys]
if filtered_by_org:
return [course for course in courses if course.location.org == filtered_by_org]
if filtered_visible_ids:
return [course for course in courses if course.id in filtered_visible_ids]
else:
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
return [course for course in courses if course.location.org not in org_filter_out_set]
subdomain = microsite.get_value('subdomain', 'default')
# See if we have filtered course listings in this domain
filtered_visible_ids = None
# this is legacy format which is outside of the microsite feature -- also handle dev case, which should not filter
if hasattr(settings, 'COURSE_LISTINGS') and subdomain in settings.COURSE_LISTINGS and not settings.DEBUG:
filtered_visible_ids = frozenset([SlashSeparatedCourseKey.from_deprecated_string(c) for c in settings.COURSE_LISTINGS[subdomain]])
if filtered_by_org:
return [course for course in courses if course.location.org == filtered_by_org]
if filtered_visible_ids:
return [course for course in courses if course.id in filtered_visible_ids]
else:
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
return [course for course in courses if course.location.org not in org_filter_out_set]
def get_university_for_request():
"""
Return the university name specified for the domain, or None
if no university was specified
"""
return microsite.get_value('university')
def get_logo_url():
"""
Return the url for the branded logo image to be used
"""
# if the MicrositeConfiguration has a value for the logo_image_url
# let's use that
image_url = microsite.get_value('logo_image_url')
if image_url:
return '{static_url}{image_url}'.format(
static_url=settings.STATIC_URL,
image_url=image_url
)
# otherwise, use the legacy means to configure this
university = microsite.get_value('university')
if university is None and settings.FEATURES.get('IS_EDX_DOMAIN', False):
return staticfiles_storage.url('images/edx-theme/edx-logo-77x36.png')
elif university:
return staticfiles_storage.url('images/{uni}-on-edx-logo.png'.format(uni=university))
else:
return staticfiles_storage.url('images/default-theme/logo.png')
|
samdmarshall/xcparse | refs/heads/develop | xcparse/Helpers/path_helper.py | 1 | import os
class path_helper(object):
"""
This is a path object to allow for root, base, and full path storage to create relative paths
"""
# base_path = '';
# obj_path = '';
# root_path = '';
def __init__(self, path, root):
self.obj_path = os.path.normpath(path);
self.base_path = os.path.dirname(self.obj_path);
if root == '':
self.root_path = self.obj_path;
else:
self.root_path = os.path.join(self.obj_path, root);
def __attrs(self):
return (self.obj_path, self.base_path, self.root_path);
def __eq__(self, other):
return isinstance(other, path_helper) and self.__attrs() == other.__attrs();
def __hash__(self):
return hash(self.__attrs());
def __repr__(self):
return '(%s : %s : %s)' % (type(self), self.base_path, self.obj_path);
@classmethod
def create_directories(cls, path):
if os.path.exists(path) == False:
os.makedirs(path); |
dmitry-sobolev/ansible | refs/heads/devel | test/units/playbook/test_playbook.py | 290 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestPlaybook(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml":"""
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
|
40223134/w16b_test | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/unittest/test/test_discovery.py | 785 | import os
import re
import sys
import unittest
class TestableTestProgram(unittest.TestProgram):
module = '__main__'
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test3.py', 'test4.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
loader.loadTestsFromModule = lambda module: module + ' tests'
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
expected = [name + ' module tests' for name in
('test1', 'test2')]
expected.extend([('test_dir.%s' % name) + ' module tests' for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return 'load_tests'
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
def loadTestsFromModule(module, use_load_tests):
if use_load_tests:
raise self.failureException('use_load_tests should be False for packages')
return module.path + ' module tests'
loader.loadTestsFromModule = loadTestsFromModule
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the test_directory package by calling load_tests
# and directly from the test_directory2 package
self.assertEqual(suite,
['load_tests', 'test_directory2' + ' module tests'])
self.assertEqual(Module.paths, ['test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, 'test_directory' + ' module tests', 'test*')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
def do_discovery(argv):
args.extend(argv)
program._do_discovery = do_discovery
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [])
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, ['foo', 'bar'])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
program.module = None
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, [])
program._do_discovery = do_discovery
program.parseArgs(['something'])
self.assertTrue(self.called)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
program.module = None
args = ['something', '-v', '-b', '-v', '-c', '-f']
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, args[1:])
program._do_discovery = do_discovery
program.parseArgs(args)
self.assertTrue(self.called)
def test_command_line_handling_do_discovery_too_many_arguments(self):
class Stop(Exception):
pass
def usageExit():
raise Stop
program = TestableTestProgram()
program.usageExit = usageExit
with self.assertRaises(Stop):
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def test_detect_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
if __name__ == '__main__':
unittest.main()
|
IEMLdev/ieml-api | refs/heads/master | ieml/usl/distance/sort.py | 2 | import numpy as np
def square_order_matrix(usl_list):
"""
Compute the ordering of a list of usls from each usl and return the matrix m s.t.
for each u in usl_list at index i, [usl_list[j] for j in m[i, :]] is the list sorted
by proximity from u.
of the result
:param usl_list: a list of usls
:return: a (len(usl_list), len(usl_list)) np.array
"""
usl_list = list(usl_list)
indexes = {
u: i for i, u in enumerate(usl_list)
}
order_mat = np.zeros(shape=(len(usl_list), len(usl_list)), dtype=int)
for u in usl_list:
sorted_list = QuerySort(u).sort(collection=usl_list)
for i, u_s in enumerate(sorted_list):
order_mat[indexes[u], indexes[u_s]] = i
return order_mat
class QuerySort:
"""order a collection of usl from a specific usl"""
def __init__(self, usl):
self.usl = usl
assert isinstance(usl, Usl)
def sort(self, collection):
def sort_key(u):
return self._proximity(u, lambda u: u.semes)
# self._proximity(u, lambda u: u.topics)
return sorted(collection, key=sort_key)
def _proximity(self, u, get_set):
s0 = get_set(self.usl)
s1 = get_set(u)
sym_diff = len(s0 ^ s1)
if sym_diff != 0:
return len(s0.intersection(s1)) / sym_diff
else:
# same subject
# use max of ( len(g0.intersection(g1)) / sym_diff ) + 1
return len(s0) + 1
|
niavlys/kivy | refs/heads/master | kivy/base.py | 4 | # pylint: disable=W0611
'''
Kivy Base
=========
This module contains core Kivy functionality and is not intended for end users.
Feel free to look though it, but calling any of these methods directly may well
result in unpredicatable behavior.
Event loop management
---------------------
'''
__all__ = (
'EventLoop',
'EventLoopBase',
'ExceptionHandler',
'ExceptionManagerBase',
'ExceptionManager',
'runTouchApp',
'stopTouchApp',
)
import sys
from kivy.config import Config
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.context import register_context
# private vars
EventLoop = None
class ExceptionHandler(object):
'''Base handler that catches exceptions in :func:`runTouchApp`.
You can subclass and extend it as follows::
class E(ExceptionHandler):
def handle_exception(self, inst):
Logger.exception('Exception catched by ExceptionHandler')
return ExceptionManager.PASS
ExceptionManager.add_handler(E())
All exceptions will be set to PASS, and logged to the console!
'''
def __init__(self):
pass
def handle_exception(self, exception):
'''Handle one exception, defaults to returning
ExceptionManager.STOP.
'''
return ExceptionManager.RAISE
class ExceptionManagerBase:
'''ExceptionManager manages exceptions handlers.'''
RAISE = 0
PASS = 1
def __init__(self):
self.handlers = []
self.policy = ExceptionManagerBase.RAISE
def add_handler(self, cls):
'''Add a new exception handler to the stack.'''
if not cls in self.handlers:
self.handlers.append(cls)
def remove_handler(self, cls):
'''Remove a exception handler from the stack.'''
if cls in self.handlers:
self.handlers.remove(cls)
def handle_exception(self, inst):
'''Called when an exception occured in the runTouchApp() main loop.'''
ret = self.policy
for handler in self.handlers:
r = handler.handle_exception(inst)
if r == ExceptionManagerBase.PASS:
ret = r
return ret
#: Instance of a :class:`ExceptionManagerBase` implementation.
ExceptionManager = register_context('ExceptionManager', ExceptionManagerBase)
class EventLoopBase(EventDispatcher):
'''Main event loop. This loop handles the updating of input and
dispatching events.
'''
__events__ = ('on_start', 'on_pause', 'on_stop')
def __init__(self):
super(EventLoopBase, self).__init__()
self.quit = False
self.input_events = []
self.postproc_modules = []
self.status = 'idle'
self.input_providers = []
self.input_providers_autoremove = []
self.event_listeners = []
self.window = None
self.me_list = []
@property
def touches(self):
'''Return the list of all touches currently in down or move states.
'''
return self.me_list
def ensure_window(self):
'''Ensure that we have a window.
'''
import kivy.core.window # NOQA
if not self.window:
Logger.critical('App: Unable to get a Window, abort.')
sys.exit(1)
def set_window(self, window):
'''Set the window used for the event loop.
'''
self.window = window
def add_input_provider(self, provider, auto_remove=False):
'''Add a new input provider to listen for touch events.
'''
if provider not in self.input_providers:
self.input_providers.append(provider)
if auto_remove:
self.input_providers_autoremove.append(provider)
def remove_input_provider(self, provider):
'''Remove an input provider.
'''
if provider in self.input_providers:
self.input_providers.remove(provider)
def add_event_listener(self, listener):
'''Add a new event listener for getting touch events.
'''
if not listener in self.event_listeners:
self.event_listeners.append(listener)
def remove_event_listener(self, listener):
'''Remove an event listener from the list.
'''
if listener in self.event_listeners:
self.event_listeners.remove(listener)
def start(self):
'''Must be called only once before run().
This starts all configured input providers.'''
self.status = 'started'
self.quit = False
for provider in self.input_providers:
provider.start()
self.dispatch('on_start')
def close(self):
'''Exit from the main loop and stop all configured
input providers.'''
self.quit = True
self.stop()
self.status = 'closed'
def stop(self):
'''Stop all input providers and call callbacks registered using
EventLoop.add_stop_callback().'''
# XXX stop in reverse order that we started them!! (like push
# pop), very important because e.g. wm_touch and WM_PEN both
# store old window proc and the restore, if order is messed big
# problem happens, crashing badly without error
for provider in reversed(self.input_providers[:]):
provider.stop()
if provider in self.input_providers_autoremove:
self.input_providers_autoremove.remove(provider)
self.input_providers.remove(provider)
# ensure any restart will not break anything later.
self.input_events = []
self.status = 'stopped'
self.dispatch('on_stop')
def add_postproc_module(self, mod):
'''Add a postproc input module (DoubleTap, TripleTap, DeJitter
RetainTouch are defaults).'''
if mod not in self.postproc_modules:
self.postproc_modules.append(mod)
def remove_postproc_module(self, mod):
'''Remove a postproc module.'''
if mod in self.postproc_modules:
self.postproc_modules.remove(mod)
def post_dispatch_input(self, etype, me):
'''This function is called by dispatch_input() when we want to dispatch
an input event. The event is dispatched to all listeners and if
grabbed, it's dispatched to grabbed widgets.
'''
# update available list
if etype == 'begin':
self.me_list.append(me)
elif etype == 'end':
if me in self.me_list:
self.me_list.remove(me)
# dispatch to listeners
if not me.grab_exclusive_class:
for listener in self.event_listeners:
listener.dispatch('on_motion', etype, me)
# dispatch grabbed touch
me.grab_state = True
for _wid in me.grab_list[:]:
# it's a weakref, call it!
wid = _wid()
if wid is None:
# object is gone, stop.
me.grab_list.remove(_wid)
continue
root_window = wid.get_root_window()
if wid != root_window and root_window is not None:
me.push()
w, h = root_window.system_size
kheight = root_window.keyboard_height
smode = root_window.softinput_mode
me.scale_for_screen(w, h, rotation=root_window.rotation,
smode=smode, kheight=kheight)
parent = wid.parent
# and do to_local until the widget
try:
if parent:
me.apply_transform_2d(parent.to_widget)
else:
me.apply_transform_2d(wid.to_widget)
me.apply_transform_2d(wid.to_parent)
except AttributeError:
# when using inner window, an app have grab the touch
# but app is removed. the touch can't access
# to one of the parent. (i.e, self.parent will be None)
# and BAM the bug happen.
me.pop()
continue
me.grab_current = wid
wid._context.push()
if etype == 'begin':
# don't dispatch again touch in on_touch_down
# a down event are nearly uniq here.
# wid.dispatch('on_touch_down', touch)
pass
elif etype == 'update':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_move', me)
else:
wid.dispatch('on_touch_move', me)
elif etype == 'end':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_up', me)
else:
wid.dispatch('on_touch_up', me)
wid._context.pop()
me.grab_current = None
if wid != root_window and root_window is not None:
me.pop()
me.grab_state = False
def _dispatch_input(self, *ev):
# remove the save event for the touch if exist
if ev in self.input_events:
self.input_events.remove(ev)
self.input_events.append(ev)
def dispatch_input(self):
'''Called by idle() to read events from input providers, pass events to
postproc, and dispatch final events.
'''
# first, aquire input events
for provider in self.input_providers:
provider.update(dispatch_fn=self._dispatch_input)
# execute post-processing modules
for mod in self.postproc_modules:
self.input_events = mod.process(events=self.input_events)
# real dispatch input
input_events = self.input_events
pop = input_events.pop
post_dispatch_input = self.post_dispatch_input
while input_events:
post_dispatch_input(*pop(0))
def idle(self):
'''This function is called after every frame. By default:
* it "ticks" the clock to the next frame.
* it reads all input and dispatches events.
* it dispatches `on_update`, `on_draw` and `on_flip` events to the
window.
'''
# update dt
Clock.tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
def run(self):
'''Main loop'''
while not self.quit:
self.idle()
self.exit()
def exit(self):
'''Close the main loop and close the window.'''
self.close()
if self.window:
self.window.close()
def on_stop(self):
'''Event handler for `on_stop` events which will be fired right
after all input providers have been stopped.'''
pass
def on_pause(self):
'''Event handler for `on_pause` which will be fired when
the event loop is paused.'''
pass
def on_start(self):
'''Event handler for `on_start` which will be fired right
after all input providers have been started.'''
pass
#: EventLoop instance
EventLoop = EventLoopBase()
def _run_mainloop():
'''If no window has been created, this will be the executed mainloop.'''
while True:
try:
EventLoop.run()
stopTouchApp()
break
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
def runTouchApp(widget=None, slave=False):
'''Static main function that starts the application loop.
You can access some magic via the following arguments:
:Parameters:
`<empty>`
To make dispatching work, you need at least one
input listener. If not, application will leave.
(MTWindow act as an input listener)
`widget`
If you pass only a widget, a MTWindow will be created
and your widget will be added to the window as the root
widget.
`slave`
No event dispatching is done. This will be your job.
`widget + slave`
No event dispatching is done. This will be your job but
we try to get the window (must be created by you beforehand)
and add the widget to it. Very usefull for embedding Kivy
in another toolkit. (like Qt, check kivy-designed)
'''
from kivy.input import MotionEventFactory, kivy_postproc_modules
# Ok, we got one widget, and we are not in slave mode
# so, user don't create the window, let's create it for him !
if widget:
EventLoop.ensure_window()
# Instance all configured input
for key, value in Config.items('input'):
Logger.debug('Base: Create provider from %s' % (str(value)))
# split value
args = str(value).split(',', 1)
if len(args) == 1:
args.append('')
provider_id, args = args
provider = MotionEventFactory.get(provider_id)
if provider is None:
Logger.warning('Base: Unknown <%s> provider' % str(provider_id))
continue
# create provider
p = provider(key, args)
if p:
EventLoop.add_input_provider(p, True)
# add postproc modules
for mod in list(kivy_postproc_modules.values()):
EventLoop.add_postproc_module(mod)
# add main widget
if widget and EventLoop.window:
if widget not in EventLoop.window.children:
EventLoop.window.add_widget(widget)
# start event loop
Logger.info('Base: Start application main loop')
EventLoop.start()
# we are in a slave mode, don't do dispatching.
if slave:
return
# in non-slave mode, they are 2 issues
#
# 1. if user created a window, call the mainloop from window.
# This is due to glut, it need to be called with
# glutMainLoop(). Only FreeGLUT got a gluMainLoopEvent().
# So, we are executing the dispatching function inside
# a redisplay event.
#
# 2. if no window is created, we are dispatching event lopp
# ourself (previous behavior.)
#
try:
if EventLoop.window is None:
_run_mainloop()
else:
EventLoop.window.mainloop()
finally:
stopTouchApp()
def stopTouchApp():
'''Stop the current application by leaving the main loop'''
if EventLoop is None:
return
if EventLoop.status != 'started':
return
Logger.info('Base: Leaving application in progress...')
EventLoop.close()
|
SHA2017-badge/micropython-esp32 | refs/heads/master | tests/basics/int_divmod_intbig.py | 45 | # test integer floor division and modulo
# this tests bignum modulo
a = 987654321987987987987987987987
b = 19
print(a % b)
print(a % -b)
print(-a % b)
print(-a % -b)
|
nayanshah/python | refs/heads/master | ml.py | 1 | import json
import nltk
import operator as op
import numpy as np
from sys import stdin
from sklearn.ensemble import RandomForestRegressor
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
class ML:
"""Toolkit for simplifying machine learning tasks"""
X = y = X_ = y_ = y_pred = []
clf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
def __init__(self, clf=None):
self.clf = clf or self.clf
def split(self, size=.2):
"""Generate test data from training data"""
self.X, self.X_, self.y, self.y_ = train_test_split(self.X, self.y, test_size=size)
return self
def params(self, train_data, X_=[], y_=[], split=True, size=.2):
"""Set the data for the model and generate test data if required"""
self.X, self.y = train_data
self.X, self.y = np.array(self.X), np.array(self.y)
self.X_, self.y_ = np.array(X_), np.array(y_)
if not self.y_.size and split:
self.split(size)
return self
def p(self, x):
"""Predict the target for some data given"""
return self.clf.predict(x)[0]
def run(self, override=None):
"""Train the classifier and run it on test values if given"""
self.clf.fit(self.X, self.y)
if self.X_.any():
self.y_pred = self.clf.predict(self.X_)
if self.y_.any():
print score(self.y_, self.y_pred)
def optimize(self, params, override=None, cv=3):
"""Find optimal parameters for the model using GridSearch"""
self.clf = override or self.clf
grid = GridSearchCV(self.clf, params, score_func=score)
grid.fit(self.X, self.y, cv=cv)
print grid.best_params_, grid.best_score_
def score(y_true, y_pred):
"""Calculate the score of predicted values againt ground truth"""
# print zip(y_true, y_pred)
from sklearn.metrics import precision_score
return precision_score(y_true, y_pred)
def read(f):
"""Return the training and target data"""
N, M = map(int, f.readline().split())
X, y = [], []
for _ in range(N):
line = f.readline().split()
y.append(int(line[1]))
X.append(map(lambda x: float(x.split(':')[1]), line[2:]))
X, y = np.array(X), np.array(y)
T = int(f.readline())
ans, X_ = [], []
for _ in range(T):
line = f.readline().split()
ans.append(line[0])
X_.append(map(lambda x: float(x.split(':')[1]), line[1:]))
X_ = np.array(X_)
return X, y, X_, ans
if __name__ == '__main__':
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=10, max_depth= 7, random_state=0)
params = { 'n_estimators': range(8,16), 'max_depth':range(1,11) }
from sklearn import tree
clf = tree.DecisionTreeClassifier(random_state=0)
params = { 'max_depth':range(1,11) }
X, y, X_, ans = read(stdin)
m = ML(clf).params((X, y), X_=X_ , split=False)
m.optimize(params)
# m.run(clf)
for i in zip(ans, m.y_pred):
print '%s %+d' % i
|
SpaceKatt/CSPLN | refs/heads/master | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/numpy/core/tests/test_regression.py | 5 | from StringIO import StringIO
import pickle
import sys
import platform
import gc
import copy
from os import path
from numpy.testing import *
from numpy.testing.utils import _assert_valid_refcount, WarningManager
from numpy.compat import asbytes, asunicode, asbytes_nested
import warnings
import tempfile
import numpy as np
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]),np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,),dtype=[('x',np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
f = StringIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a,b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64],'Int64')
assert_equal(np.typeNA[np.uint64],'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name','label'),np.int32,3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5,5,5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = StringIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5,3))
b = a[:,:2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError,rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j,4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3,2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False,True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError,np.dtype,
{'names':['a'],'formats':['foo']},align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width,16)
self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
self.assertRaises(ValueError,np.intp,'0x1',32)
assert_equal(255,np.intp('0xFF',16))
assert_equal(1024,np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10.,dtype='>f8')
b = np.arange(10.,dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa,ya.nonzero())
assert_array_almost_equal(xb,yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
z = np.dot(x, np.transpose(y))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10,dtype='<f8')
assert_array_equal(ref,x)
x = np.arange(10,dtype='>f8')
assert_array_equal(ref,x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0],10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3,3))
y = np.array([0,0,0])
self.assertRaises(ValueError,np.hstack,(x,y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0,np.add.identity)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i',int),('f',float),('s','|S3')])
x = np.rec.array([(1,1.1,'1.0'),
(2,2.2,'2.0')],dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello',np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
x = np.array(np.random.rand(3,3),order='F')[:,:2]
assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3,1))
x.flat = np.arange(3)
assert_array_almost_equal(x,[[0],[1],[2]])
x.flat = np.arange(3,dtype=float)
assert_array_almost_equal(x,[[0],[1],[2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3,1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3,dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
f = StringIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt,dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x',int),('y',np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1,'object'], dt)
# Correct way
np.array([(1,'object')],dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1,2,3],dtype=np.int32)
b = a.copy()
r = np.rec.array(a,shape=1,formats=['3i4'],names=['d'])
assert_array_equal(a,b)
assert_equal(a,r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,),5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,))
assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2))
assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2))
assert_equal(np.array([],dtype=object).shape, (0,))
assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0))
assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y),decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x","x ","x "))
for c in x: assert_equal(c,"x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1,2,3,4,5,6,7,8,9,10])
assert_equal(np.lexsort(v),0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='>c8').real.max(),3.0)
assert_equal(np.arange(4,dtype='<c8').real.max(),3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1,'A',None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3,1),int)
a[[1,2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'),('two', '<i4')])
x = np.array((1,2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8,7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert abs(arr-res2).max() < 1e-8, func
else:
assert abs(res1-res2).max() < 1e-8, func
for func in funcs2:
arr1 = np.random.rand(8,7)
arr2 = np.random.rand(8,7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert abs(res1-res2).max() < 1e-8, func
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc','cde','fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1,2])[np.array([0])]
assert_equal(x.shape,(1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x',np.int16),('y',np.float64)]
ra = np.array([(1,2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert ra['x'] != rb['x']
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1,2],[3,4],[5,6]])
x2 = np.array(['a','dd','xyz'])
x3 = np.array([1.1,2,3])
np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2,2),object)
x.flat[2] = (1,2,3)
assert_equal(x.flat[2],(1,2,3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1,2,3],dtype=np.float64)
assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2)
assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data,data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert np.dtype('i4') == np.dtype(('i4',()))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array,[['X'],['X','X','X']],'|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2,3,order='F')
assert_equal(a,[[0,2,4],[1,3,5]])
a = np.array([[1,2],[3,4],[5,6],[7,8]])
b = a[:,1]
assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]])
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1,2,3])
a2 = np.array([[1,2,3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1,2,3],dtype=object)
assert a.argmax() == 2
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0','i4'),('f1','i4')])
dt1 = np.dtype([('f0','i8'),('f1','i8')])
for a in [np.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)]),
np.rec.fromarrays([(1,2),(3,4)],"i4,i4"),
np.rec.fromarrays([(1,2),(3,4)])]:
assert_(a.dtype in [dt0,dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5,1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0),a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi','There'])
assert_equal(ca.startswith('H'),[True,False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError,np.convolve,[],[1])
self.assertRaises(ValueError,np.convolve,[1],[])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1,(0,1,2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256,(0,256,512))],r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert asbytes(r['var1'][0][0]) == asbytes('abc')
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3,4))
a = np.take(x,[0,2],axis=1)
b = np.zeros_like(a)
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
errstate = np.seterr(all='raise')
try:
sstr = np.array_str(s)
finally:
np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(range(16))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert a[a.argmax()] == a.max()
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5,dtype=float)
b = np.array(a,dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1,5,25,125.,625]])
y = np.array([[20.],[160.],[640.],[1280.],[1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x,z),np.dot(x,y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484,286])
y = np.zeros([484,286])
x |= y
self.assertRaises(TypeError,rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = cPickle.loads(cPickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4','<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0,dtype=dt).dtype,dt)
assert_equal(np.arange(0.5,dtype=dt).dtype,dt)
assert_equal(np.arange(5,dtype=dt).dtype,dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10,dtype=float)
x = np.array((15,),dtype=float)
def ia(x,s): x[(s>0)]=1.0
self.assertRaises(ValueError,ia,x,s)
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0],dtype=float)
index = np.array(0,dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0,width=3),'000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12,9,9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a','aa','b'])
y = np.array(['d','e'])
assert_equal(x.searchsorted(y), [3,3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1,0]))
assert_array_equal(x.argsort(kind='q'), np.array([1,0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0,-0.0,0])
assert_equal(str(np.abs(x)),'[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'),np.dtype('>i4')):
x = np.array([-1,0,1],dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0,3,4)).T.reshape(-1,3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1d broadcasted slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1d -> 2d broadcasted slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert cnt(a) == cnt0_a + 1
arr0[1] = b
assert cnt(b) == cnt0_b + 1
arr[:,:] = arr0
assert cnt(a) == cnt0_a + 6
assert cnt(b) == cnt0_b + 6
arr[:,0] = None
assert cnt(a) == cnt0_a + 1
del arr, arr0
# -- 2d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:,0] = a
arr[:,1] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr2 = arr.copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
arr2 = arr[:,0].copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 5
arr2 = arr.flatten()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr3 = np.concatenate((arr1, arr2))
assert cnt(a) == cnt0_a + 5 + 5
assert cnt(b) == cnt0_b + 5 + 5
arr3 = arr1.repeat(3, axis=0)
assert cnt(a) == cnt0_a + 5 + 3*5
arr3 = arr1.take([1,2,3], axis=0)
assert cnt(a) == cnt0_a + 5 + 3
x = np.array([[0],[1],[0],[1],[1]], int)
arr3 = x.choose(arr1, arr2)
assert cnt(a) == cnt0_a + 5 + 2
assert cnt(b) == cnt0_b + 5 + 3
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat:
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert not arr[0].deleted
arr[:] = arr # trying to induce a segfault by doing it again...
assert not arr[0].deleted
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1,2,3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert not np.any(a)
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_complex_dtype_printing(self, level=rlevel):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30,40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
xp = pickle.load(open(filename, 'rb'), encoding='latin1')
else:
xp = pickle.load(open(filename))
xpd = xp.astype(np.float64)
assert (xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0])
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba')
assert_(dat.info == 'jubba')
dat.resize((4,2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2,3,4],[6,3,4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32,0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2,7).info == 'jubba')
assert_(dat.compress([0,1,1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0],'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32,0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2,4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0,1).info == 'jubba')
assert_(dat.take([2,3,5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
assert_equal(a,b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
x = np.array([(asbytes('a'),u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert a.dtype.names[0] == "notfoo"
assert a.dtype.names[1] == "bar"
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4,5])
d = np.object_([None, {}, []])
assert a is None
assert type(b) is int
assert type(b2) is float
assert type(c) is np.ndarray
assert c.dtype == object
assert d.dtype == object
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0,1],[2,3]])
self.assertRaises(TypeError, x.resize, (2,2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
old_err = np.seterr(all="ignore")
try:
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert n_before >= n_after, (n_before, n_after)
finally:
np.seterr(**old_err)
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert arr[0][0] == 'john'
assert arr[0][1] == 4
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in xrange(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert np.all(z == 0)
assert z.shape == (m, n)
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert a.itemsize == 16
a = np.array([u'123', '1234'])
assert a.itemsize == 16
a = np.array(['1234', u'123', '12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'1234'])
assert a.itemsize == 16
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259"""
z = np.array([-1j], '<c8')
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert x[0,1] == x[0,0]
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert sys.getrefcount(strb) == numb
assert sys.getrefcount(stra) == numa + 2
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
old_err = np.seterr(divide="ignore")
try:
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
finally:
np.seterr(**old_err)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1,2,3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
# Just make sure this doesn't throw an exception
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timeinteger))]
a = np.array([], dtypes[0])
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError, e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1,2,3),(4,5,6),(7,8,9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls,i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1,2,3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4,4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tostring())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
ctx = WarningManager()
ctx.__enter__()
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
ctx.__exit__()
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([],['?','?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
# This was throwing an exception because in ctors.c,
# discover_itemsize was calling PyObject_Length without checking
# the return code. This failed to get the length of the number 2,
# and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1,0]), [])
def test_objectarray_setfield(self):
# Setfield directly manipulates the raw array data,
# so is invalid for object arrays.
x = np.array([1,2,3], dtype=object)
assert_raises(RuntimeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
@dec.knownfailureif(sys.version_info[0] >= 3,
"a.dtype is U5 for Py 3.x. Knownfail for 1.6.x")
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1],[s2],[s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype('str')
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1,17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
def test_structured_type_to_object(self):
a_rec = np.array([(0,1), (3,2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0,1)
a_obj[1] = (3,2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2,2), dtype=object)
a[...] = [[1,2]]
assert_equal(a, [[1,2], [1,2]])
if __name__ == "__main__":
run_module_suite()
|
ASinanSaglam/westpa | refs/heads/master | lib/examples/wca-dimer_openmm/we_custom/sampling.py | 4 | import simtk
import simtk.unit as units
import simtk.openmm as openmm
def minimize(platform, system, positions):
# Create a Context.
timestep = 1.0 * units.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = openmm.Context(system, integrator, platform)
# Set coordinates.
context.setPositions(positions)
# Compute initial energy.
state = context.getState(getEnergy=True)
initial_potential = state.getPotentialEnergy()
print "initial potential: %12.3f kcal/mol" % (initial_potential / units.kilocalories_per_mole)
# Minimize.
openmm.LocalEnergyMinimizer.minimize(context)
# Compute final energy.
state = context.getState(getEnergy=True, getPositions=True)
final_potential = state.getPotentialEnergy()
positions = state.getPositions(asNumpy=True)
# Report
print "final potential : %12.3f kcal/mol" % (final_potential / units.kilocalories_per_mole)
return positions
|
rubyinhell/brython | refs/heads/master | www/src/Lib/test/test_importlib/source/test_file_loader.py | 28 | from importlib import machinery
import importlib
import importlib.abc
from .. import abc
from .. import util
from . import util as source_util
import errno
import imp
import marshal
import os
import py_compile
import shutil
import stat
import sys
import unittest
from test.support import make_legacy_pyc
class SimpleTest(unittest.TestCase):
"""Should have no issue importing a source module [basic]. And if there is
a syntax error, it should raise a SyntaxError [syntax error].
"""
def test_load_module_API(self):
# If fullname is not specified that assume self.name is desired.
class TesterMixin(importlib.abc.Loader):
def load_module(self, fullname): return fullname
def module_repr(self, module): return '<module>'
class Tester(importlib.abc.FileLoader, TesterMixin):
def get_code(self, _): pass
def get_source(self, _): pass
def is_package(self, _): pass
name = 'mod_name'
loader = Tester(name, 'some_path')
self.assertEqual(name, loader.load_module())
self.assertEqual(name, loader.load_module(None))
self.assertEqual(name, loader.load_module(name))
with self.assertRaises(ImportError):
loader.load_module(loader.name + 'XXX')
def test_get_filename_API(self):
# If fullname is not set then assume self.path is desired.
class Tester(importlib.abc.FileLoader):
def get_code(self, _): pass
def get_source(self, _): pass
def is_package(self, _): pass
def module_repr(self, _): pass
path = 'some_path'
name = 'some_name'
loader = Tester(name, path)
self.assertEqual(path, loader.get_filename(name))
self.assertEqual(path, loader.get_filename())
self.assertEqual(path, loader.get_filename(None))
with self.assertRaises(ImportError):
loader.get_filename(name + 'XXX')
# [basic]
def test_module(self):
with source_util.create_modules('_temp') as mapping:
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
module = loader.load_module('_temp')
self.assertIn('_temp', sys.modules)
check = {'__name__': '_temp', '__file__': mapping['_temp'],
'__package__': ''}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_package(self):
with source_util.create_modules('_pkg.__init__') as mapping:
loader = machinery.SourceFileLoader('_pkg',
mapping['_pkg.__init__'])
module = loader.load_module('_pkg')
self.assertIn('_pkg', sys.modules)
check = {'__name__': '_pkg', '__file__': mapping['_pkg.__init__'],
'__path__': [os.path.dirname(mapping['_pkg.__init__'])],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_lacking_parent(self):
with source_util.create_modules('_pkg.__init__', '_pkg.mod')as mapping:
loader = machinery.SourceFileLoader('_pkg.mod',
mapping['_pkg.mod'])
module = loader.load_module('_pkg.mod')
self.assertIn('_pkg.mod', sys.modules)
check = {'__name__': '_pkg.mod', '__file__': mapping['_pkg.mod'],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def fake_mtime(self, fxn):
"""Fake mtime to always be higher than expected."""
return lambda name: fxn(name) + 1
def test_module_reuse(self):
with source_util.create_modules('_temp') as mapping:
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
module = loader.load_module('_temp')
module_id = id(module)
module_dict_id = id(module.__dict__)
with open(mapping['_temp'], 'w') as file:
file.write("testing_var = 42\n")
module = loader.load_module('_temp')
self.assertIn('testing_var', module.__dict__,
"'testing_var' not in "
"{0}".format(list(module.__dict__.keys())))
self.assertEqual(module, sys.modules['_temp'])
self.assertEqual(id(module), module_id)
self.assertEqual(id(module.__dict__), module_dict_id)
def test_state_after_failure(self):
# A failed reload should leave the original module intact.
attributes = ('__file__', '__path__', '__package__')
value = '<test>'
name = '_temp'
with source_util.create_modules(name) as mapping:
orig_module = imp.new_module(name)
for attr in attributes:
setattr(orig_module, attr, value)
with open(mapping[name], 'w') as file:
file.write('+++ bad syntax +++')
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.load_module(name)
for attr in attributes:
self.assertEqual(getattr(orig_module, attr), value)
# [syntax error]
def test_bad_syntax(self):
with source_util.create_modules('_temp') as mapping:
with open(mapping['_temp'], 'w') as file:
file.write('=')
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.load_module('_temp')
self.assertNotIn('_temp', sys.modules)
def test_file_from_empty_string_dir(self):
# Loading a module found from an empty string entry on sys.path should
# not only work, but keep all attributes relative.
file_path = '_temp.py'
with open(file_path, 'w') as file:
file.write("# test file for importlib")
try:
with util.uncache('_temp'):
loader = machinery.SourceFileLoader('_temp', file_path)
mod = loader.load_module('_temp')
self.assertEqual(file_path, mod.__file__)
self.assertEqual(imp.cache_from_source(file_path),
mod.__cached__)
finally:
os.unlink(file_path)
pycache = os.path.dirname(imp.cache_from_source(file_path))
if os.path.exists(pycache):
shutil.rmtree(pycache)
def test_timestamp_overflow(self):
# When a modification timestamp is larger than 2**32, it should be
# truncated rather than raise an OverflowError.
with source_util.create_modules('_temp') as mapping:
source = mapping['_temp']
compiled = imp.cache_from_source(source)
with open(source, 'w') as f:
f.write("x = 5")
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
mod = loader.load_module('_temp')
# Sanity checks.
self.assertEqual(mod.__cached__, compiled)
self.assertEqual(mod.x, 5)
# The pyc file was created.
os.stat(compiled)
class BadBytecodeTest(unittest.TestCase):
def import_(self, file, module_name):
loader = self.loader(module_name, file)
module = loader.load_module(module_name)
self.assertIn(module_name, sys.modules)
def manipulate_bytecode(self, name, mapping, manipulator, *,
del_source=False):
"""Manipulate the bytecode of a module by passing it into a callable
that returns what to use as the new bytecode."""
try:
del sys.modules['_temp']
except KeyError:
pass
py_compile.compile(mapping[name])
if not del_source:
bytecode_path = imp.cache_from_source(mapping[name])
else:
os.unlink(mapping[name])
bytecode_path = make_legacy_pyc(mapping[name])
if manipulator:
with open(bytecode_path, 'rb') as file:
bc = file.read()
new_bc = manipulator(bc)
with open(bytecode_path, 'wb') as file:
if new_bc is not None:
file.write(new_bc)
return bytecode_path
def _test_empty_file(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'',
del_source=del_source)
test('_temp', mapping, bc_path)
@source_util.writes_bytecode_files
def _test_partial_magic(self, test, *, del_source=False):
# When their are less than 4 bytes to a .pyc, regenerate it if
# possible, else raise ImportError.
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:3],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_magic_only(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:4],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_timestamp(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:7],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_size(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:11],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_no_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12],
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bc_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_non_code_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12] + marshal.dumps(b'abcd'),
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(ImportError) as cm:
self.import_(file_path, '_temp')
self.assertEqual(cm.exception.name, '_temp')
self.assertEqual(cm.exception.path, bytecode_path)
def _test_bad_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12] + b'<test>',
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_bad_magic(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'\x00\x00\x00\x00' + bc[4:])
test('_temp', mapping, bc_path)
class SourceLoaderBadBytecodeTest(BadBytecodeTest):
loader = machinery.SourceFileLoader
@source_util.writes_bytecode_files
def test_empty_file(self):
# When a .pyc is empty, regenerate it if possible, else raise
# ImportError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_empty_file(test)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_magic(test)
@source_util.writes_bytecode_files
def test_magic_only(self):
# When there is only the magic number, regenerate the .pyc if possible,
# else raise EOFError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_magic_only(test)
@source_util.writes_bytecode_files
def test_bad_magic(self):
# When the magic number is different, the bytecode should be
# regenerated.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as bytecode_file:
self.assertEqual(bytecode_file.read(4), imp.get_magic())
self._test_bad_magic(test)
@source_util.writes_bytecode_files
def test_partial_timestamp(self):
# When the timestamp is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_timestamp(test)
@source_util.writes_bytecode_files
def test_partial_size(self):
# When the size is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_size(test)
@source_util.writes_bytecode_files
def test_no_marshal(self):
# When there is only the magic number and timestamp, raise EOFError.
self._test_no_marshal()
@source_util.writes_bytecode_files
def test_non_code_marshal(self):
self._test_non_code_marshal()
# XXX ImportError when sourceless
# [bad marshal]
@source_util.writes_bytecode_files
def test_bad_marshal(self):
# Bad marshal data should raise a ValueError.
self._test_bad_marshal()
# [bad timestamp]
@source_util.writes_bytecode_files
def test_old_timestamp(self):
# When the timestamp is older than the source, bytecode should be
# regenerated.
zeros = b'\x00\x00\x00\x00'
with source_util.create_modules('_temp') as mapping:
py_compile.compile(mapping['_temp'])
bytecode_path = imp.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(4)
bytecode_file.write(zeros)
self.import_(mapping['_temp'], '_temp')
source_mtime = os.path.getmtime(mapping['_temp'])
source_timestamp = importlib._w_long(source_mtime)
with open(bytecode_path, 'rb') as bytecode_file:
bytecode_file.seek(4)
self.assertEqual(bytecode_file.read(4), source_timestamp)
# [bytecode read-only]
@source_util.writes_bytecode_files
def test_read_only_bytecode(self):
# When bytecode is read-only but should be rewritten, fail silently.
with source_util.create_modules('_temp') as mapping:
# Create bytecode that will need to be re-created.
py_compile.compile(mapping['_temp'])
bytecode_path = imp.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(0)
bytecode_file.write(b'\x00\x00\x00\x00')
# Make the bytecode read-only.
os.chmod(bytecode_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
try:
# Should not raise IOError!
self.import_(mapping['_temp'], '_temp')
finally:
# Make writable for eventual clean-up.
os.chmod(bytecode_path, stat.S_IWUSR)
class SourcelessLoaderBadBytecodeTest(BadBytecodeTest):
loader = machinery.SourcelessFileLoader
def test_empty_file(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_empty_file(test, del_source=True)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_partial_magic(test, del_source=True)
def test_magic_only(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_magic_only(test, del_source=True)
def test_bad_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_bad_magic(test, del_source=True)
def test_partial_timestamp(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_timestamp(test, del_source=True)
def test_partial_size(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_size(test, del_source=True)
def test_no_marshal(self):
self._test_no_marshal(del_source=True)
def test_non_code_marshal(self):
self._test_non_code_marshal(del_source=True)
def test_main():
from test.support import run_unittest
run_unittest(SimpleTest,
SourceLoaderBadBytecodeTest,
SourcelessLoaderBadBytecodeTest
)
if __name__ == '__main__':
test_main()
|
ojii/sandlib | refs/heads/master | lib/lib-python/2.7/encodings/utf_16_le.py | 860 | """ Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
romain-dartigues/ansible | refs/heads/devel | test/units/modules/network/ovs/test_openvswitch_port.py | 68 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.ovs import openvswitch_port
from units.modules.utils import set_module_args
from .ovs_module import TestOpenVSwitchModule, load_fixture
test_name_side_effect_matrix = {
'test_openvswitch_port_absent_idempotent': [
(0, '', '')],
'test_openvswitch_port_absent_removes_port': [
(0, 'list_ports_test_br.cfg', ''),
(0, 'get_port_eth2_tag.cfg', ''),
(0, 'get_port_eth2_external_ids.cfg', ''),
(0, '', '')],
'test_openvswitch_port_present_idempotent': [
(0, 'list_ports_test_br.cfg', ''),
(0, 'get_port_eth2_tag.cfg', ''),
(0, 'get_port_eth2_external_ids.cfg', ''),
(0, '', '')],
'test_openvswitch_port_present_creates_port': [
(0, '', ''),
(0, '', ''),
(0, '', '')],
'test_openvswitch_port_present_changes_tag': [
(0, 'list_ports_test_br.cfg', ''),
(0, 'get_port_eth2_tag.cfg', ''),
(0, 'get_port_eth2_external_ids.cfg', ''),
(0, '', '')],
'test_openvswitch_port_present_changes_external_id': [
(0, 'list_ports_test_br.cfg', ''),
(0, 'get_port_eth2_tag.cfg', ''),
(0, 'get_port_eth2_external_ids.cfg', ''),
(0, '', '')],
'test_openvswitch_port_present_adds_external_id': [
(0, 'list_ports_test_br.cfg', ''),
(0, 'get_port_eth2_tag.cfg', ''),
(0, 'get_port_eth2_external_ids.cfg', ''),
(0, '', '')],
'test_openvswitch_port_present_clears_external_id': [
(0, 'list_ports_test_br.cfg', ''),
(0, 'get_port_eth2_tag.cfg', ''),
(0, 'get_port_eth2_external_ids.cfg', ''),
(0, '', '')],
'test_openvswitch_port_present_runs_set_mode': [
(0, '', ''),
(0, '', ''),
(0, '', '')],
}
class TestOpenVSwitchPortModule(TestOpenVSwitchModule):
module = openvswitch_port
def setUp(self):
super(TestOpenVSwitchPortModule, self).setUp()
self.mock_run_command = (
patch('ansible.module_utils.basic.AnsibleModule.run_command'))
self.run_command = self.mock_run_command.start()
self.mock_get_bin_path = (
patch('ansible.module_utils.basic.AnsibleModule.get_bin_path'))
self.get_bin_path = self.mock_get_bin_path.start()
def tearDown(self):
super(TestOpenVSwitchPortModule, self).tearDown()
self.mock_run_command.stop()
self.mock_get_bin_path.stop()
def load_fixtures(self, test_name):
test_side_effects = []
for s in test_name_side_effect_matrix[test_name]:
rc = s[0]
out = s[1] if s[1] == '' else str(load_fixture(s[1]))
err = s[2]
side_effect_with_fixture_loaded = (rc, out, err)
test_side_effects.append(side_effect_with_fixture_loaded)
self.run_command.side_effect = test_side_effects
self.get_bin_path.return_value = '/usr/bin/ovs-vsctl'
def test_openvswitch_port_absent_idempotent(self):
set_module_args(dict(state='absent',
bridge='test-br',
port='eth2'))
self.execute_module(test_name='test_openvswitch_port_absent_idempotent')
def test_openvswitch_port_absent_removes_port(self):
set_module_args(dict(state='absent',
bridge='test-br',
port='eth2'))
commands = [
'/usr/bin/ovs-vsctl -t 5 del-port test-br eth2',
]
self.execute_module(changed=True, commands=commands,
test_name='test_openvswitch_port_absent_removes_port')
def test_openvswitch_port_present_idempotent(self):
set_module_args(dict(state='present',
bridge='test-br',
port='eth2',
tag=10,
external_ids={'foo': 'bar'}))
self.execute_module(test_name='test_openvswitch_port_present_idempotent')
def test_openvswitch_port_present_creates_port(self):
set_module_args(dict(state='present',
bridge='test-br',
port='eth2',
tag=10,
external_ids={'foo': 'bar'}))
commands = [
'/usr/bin/ovs-vsctl -t 5 add-port test-br eth2 tag=10',
'/usr/bin/ovs-vsctl -t 5 set port eth2 external_ids:foo=bar'
]
self.execute_module(changed=True,
commands=commands,
test_name='test_openvswitch_port_present_creates_port')
def test_openvswitch_port_present_changes_tag(self):
set_module_args(dict(state='present',
bridge='test-br',
port='eth2',
tag=20,
external_ids={'foo': 'bar'}))
commands = [
'/usr/bin/ovs-vsctl -t 5 set port eth2 tag=20'
]
self.execute_module(changed=True,
commands=commands,
test_name='test_openvswitch_port_present_changes_tag')
def test_openvswitch_port_present_changes_external_id(self):
set_module_args(dict(state='present',
bridge='test-br',
port='eth2',
tag=10,
external_ids={'foo': 'baz'}))
commands = [
'/usr/bin/ovs-vsctl -t 5 set port eth2 external_ids:foo=baz'
]
self.execute_module(changed=True,
commands=commands,
test_name='test_openvswitch_port_present_changes_external_id')
def test_openvswitch_port_present_adds_external_id(self):
set_module_args(dict(state='present',
bridge='test-br',
port='eth2',
tag=10,
external_ids={'foo2': 'bar2'}))
commands = [
'/usr/bin/ovs-vsctl -t 5 set port eth2 external_ids:foo2=bar2'
]
self.execute_module(changed=True,
commands=commands,
test_name='test_openvswitch_port_present_adds_external_id')
def test_openvswitch_port_present_clears_external_id(self):
set_module_args(dict(state='present',
bridge='test-br',
port='eth2',
tag=10,
external_ids={'foo': None}))
commands = [
'/usr/bin/ovs-vsctl -t 5 remove port eth2 external_ids foo'
]
self.execute_module(changed=True,
commands=commands,
test_name='test_openvswitch_port_present_clears_external_id')
def test_openvswitch_port_present_runs_set_mode(self):
set_module_args(dict(state='present',
bridge='test-br',
port='eth2',
tag=10,
external_ids={'foo': 'bar'},
set="port eth2 other_config:stp-path-cost=10"))
commands = [
'/usr/bin/ovs-vsctl -t 5 add-port test-br eth2 tag=10 -- set'
' port eth2 other_config:stp-path-cost=10',
'/usr/bin/ovs-vsctl -t 5 set port eth2 external_ids:foo=bar'
]
self.execute_module(changed=True, commands=commands,
test_name='test_openvswitch_port_present_runs_set_mode')
|
sjfloat/youtube-dl | refs/heads/master | devscripts/gh-pages/update-feed.py | 159 | #!/usr/bin/env python3
from __future__ import unicode_literals
import datetime
import io
import json
import textwrap
atom_template = textwrap.dedent("""\
<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<link rel="self" href="http://rg3.github.io/youtube-dl/update/releases.atom" />
<title>youtube-dl releases</title>
<id>https://yt-dl.org/feed/youtube-dl-updates-feed</id>
<updated>@TIMESTAMP@</updated>
@ENTRIES@
</feed>""")
entry_template = textwrap.dedent("""
<entry>
<id>https://yt-dl.org/feed/youtube-dl-updates-feed/youtube-dl-@VERSION@</id>
<title>New version @VERSION@</title>
<link href="http://rg3.github.io/youtube-dl" />
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
Downloads available at <a href="https://yt-dl.org/downloads/@VERSION@/">https://yt-dl.org/downloads/@VERSION@/</a>
</div>
</content>
<author>
<name>The youtube-dl maintainers</name>
</author>
<updated>@TIMESTAMP@</updated>
</entry>
""")
now = datetime.datetime.now()
now_iso = now.isoformat() + 'Z'
atom_template = atom_template.replace('@TIMESTAMP@', now_iso)
versions_info = json.load(open('update/versions.json'))
versions = list(versions_info['versions'].keys())
versions.sort()
entries = []
for v in versions:
fields = v.split('.')
year, month, day = map(int, fields[:3])
faked = 0
patchlevel = 0
while True:
try:
datetime.date(year, month, day)
except ValueError:
day -= 1
faked += 1
assert day > 0
continue
break
if len(fields) >= 4:
try:
patchlevel = int(fields[3])
except ValueError:
patchlevel = 1
timestamp = '%04d-%02d-%02dT00:%02d:%02dZ' % (year, month, day, faked, patchlevel)
entry = entry_template.replace('@TIMESTAMP@', timestamp)
entry = entry.replace('@VERSION@', v)
entries.append(entry)
entries_str = textwrap.indent(''.join(entries), '\t')
atom_template = atom_template.replace('@ENTRIES@', entries_str)
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
atom_file.write(atom_template)
|
xubayer786/namebench | refs/heads/master | nb_third_party/dns/name.py | 228 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Names.
@var root: The DNS root name.
@type root: dns.name.Name object
@var empty: The empty DNS name.
@type empty: dns.name.Name object
"""
import cStringIO
import struct
import sys
if sys.hexversion >= 0x02030000:
import encodings.idna
import dns.exception
NAMERELN_NONE = 0
NAMERELN_SUPERDOMAIN = 1
NAMERELN_SUBDOMAIN = 2
NAMERELN_EQUAL = 3
NAMERELN_COMMONANCESTOR = 4
class EmptyLabel(dns.exception.SyntaxError):
"""Raised if a label is empty."""
pass
class BadEscape(dns.exception.SyntaxError):
"""Raised if an escaped code in a text format name is invalid."""
pass
class BadPointer(dns.exception.FormError):
"""Raised if a compression pointer points forward instead of backward."""
pass
class BadLabelType(dns.exception.FormError):
"""Raised if the label type of a wire format name is unknown."""
pass
class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
"""Raised if an attempt is made to convert a non-absolute name to
wire when there is also a non-absolute (or missing) origin."""
pass
class NameTooLong(dns.exception.FormError):
"""Raised if a name is > 255 octets long."""
pass
class LabelTooLong(dns.exception.SyntaxError):
"""Raised if a label is > 63 octets long."""
pass
class AbsoluteConcatenation(dns.exception.DNSException):
"""Raised if an attempt is made to append anything other than the
empty name to an absolute name."""
pass
class NoParent(dns.exception.DNSException):
"""Raised if an attempt is made to get the parent of the root name
or the empty name."""
pass
_escaped = {
'"' : True,
'(' : True,
')' : True,
'.' : True,
';' : True,
'\\' : True,
'@' : True,
'$' : True
}
def _escapify(label):
"""Escape the characters in label which need it.
@returns: the escaped string
@rtype: string"""
text = ''
for c in label:
if c in _escaped:
text += '\\' + c
elif ord(c) > 0x20 and ord(c) < 0x7F:
text += c
else:
text += '\\%03d' % ord(c)
return text
def _validate_labels(labels):
"""Check for empty labels in the middle of a label sequence,
labels that are too long, and for too many labels.
@raises NameTooLong: the name as a whole is too long
@raises LabelTooLong: an individual label is too long
@raises EmptyLabel: a label is empty (i.e. the root label) and appears
in a position other than the end of the label sequence"""
l = len(labels)
total = 0
i = -1
j = 0
for label in labels:
ll = len(label)
total += ll + 1
if ll > 63:
raise LabelTooLong
if i < 0 and label == '':
i = j
j += 1
if total > 255:
raise NameTooLong
if i >= 0 and i != l - 1:
raise EmptyLabel
class Name(object):
"""A DNS name.
The dns.name.Name class represents a DNS name as a tuple of labels.
Instances of the class are immutable.
@ivar labels: The tuple of labels in the name. Each label is a string of
up to 63 octets."""
__slots__ = ['labels']
def __init__(self, labels):
"""Initialize a domain name from a list of labels.
@param labels: the labels
@type labels: any iterable whose values are strings
"""
super(Name, self).__setattr__('labels', tuple(labels))
_validate_labels(self.labels)
def __setattr__(self, name, value):
raise TypeError("object doesn't support attribute assignment")
def is_absolute(self):
"""Is the most significant label of this name the root label?
@rtype: bool
"""
return len(self.labels) > 0 and self.labels[-1] == ''
def is_wild(self):
"""Is this name wild? (I.e. Is the least significant label '*'?)
@rtype: bool
"""
return len(self.labels) > 0 and self.labels[0] == '*'
def __hash__(self):
"""Return a case-insensitive hash of the name.
@rtype: int
"""
h = 0L
for label in self.labels:
for c in label:
h += ( h << 3 ) + ord(c.lower())
return int(h % sys.maxint)
def fullcompare(self, other):
"""Compare two names, returning a 3-tuple (relation, order, nlabels).
I{relation} describes the relation ship beween the names,
and is one of: dns.name.NAMERELN_NONE,
dns.name.NAMERELN_SUPERDOMAIN, dns.name.NAMERELN_SUBDOMAIN,
dns.name.NAMERELN_EQUAL, or dns.name.NAMERELN_COMMONANCESTOR
I{order} is < 0 if self < other, > 0 if self > other, and ==
0 if self == other. A relative name is always less than an
absolute name. If both names have the same relativity, then
the DNSSEC order relation is used to order them.
I{nlabels} is the number of significant labels that the two names
have in common.
"""
sabs = self.is_absolute()
oabs = other.is_absolute()
if sabs != oabs:
if sabs:
return (NAMERELN_NONE, 1, 0)
else:
return (NAMERELN_NONE, -1, 0)
l1 = len(self.labels)
l2 = len(other.labels)
ldiff = l1 - l2
if ldiff < 0:
l = l1
else:
l = l2
order = 0
nlabels = 0
namereln = NAMERELN_NONE
while l > 0:
l -= 1
l1 -= 1
l2 -= 1
label1 = self.labels[l1].lower()
label2 = other.labels[l2].lower()
if label1 < label2:
order = -1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
elif label1 > label2:
order = 1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
nlabels += 1
order = ldiff
if ldiff < 0:
namereln = NAMERELN_SUPERDOMAIN
elif ldiff > 0:
namereln = NAMERELN_SUBDOMAIN
else:
namereln = NAMERELN_EQUAL
return (namereln, order, nlabels)
def is_subdomain(self, other):
"""Is self a subdomain of other?
The notion of subdomain includes equality.
@rtype: bool
"""
(nr, o, nl) = self.fullcompare(other)
if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
return True
return False
def is_superdomain(self, other):
"""Is self a superdomain of other?
The notion of subdomain includes equality.
@rtype: bool
"""
(nr, o, nl) = self.fullcompare(other)
if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
return True
return False
def canonicalize(self):
"""Return a name which is equal to the current name, but is in
DNSSEC canonical form.
@rtype: dns.name.Name object
"""
return Name([x.lower() for x in self.labels])
def __eq__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] == 0
else:
return False
def __ne__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] != 0
else:
return True
def __lt__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] < 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] <= 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, Name):
return self.fullcompare(other)[1] > 0
else:
return NotImplemented
def __repr__(self):
return '<DNS name ' + self.__str__() + '>'
def __str__(self):
return self.to_text(False)
def to_text(self, omit_final_dot = False):
"""Convert name to text format.
@param omit_final_dot: If True, don't emit the final dot (denoting the
root label) for absolute names. The default is False.
@rtype: string
"""
if len(self.labels) == 0:
return '@'
if len(self.labels) == 1 and self.labels[0] == '':
return '.'
if omit_final_dot and self.is_absolute():
l = self.labels[:-1]
else:
l = self.labels
s = '.'.join(map(_escapify, l))
return s
def to_unicode(self, omit_final_dot = False):
"""Convert name to Unicode text format.
IDN ACE lables are converted to Unicode.
@param omit_final_dot: If True, don't emit the final dot (denoting the
root label) for absolute names. The default is False.
@rtype: string
"""
if len(self.labels) == 0:
return u'@'
if len(self.labels) == 1 and self.labels[0] == '':
return u'.'
if omit_final_dot and self.is_absolute():
l = self.labels[:-1]
else:
l = self.labels
s = u'.'.join([encodings.idna.ToUnicode(_escapify(x)) for x in l])
return s
def to_digestable(self, origin=None):
"""Convert name to a format suitable for digesting in hashes.
The name is canonicalized and converted to uncompressed wire format.
@param origin: If the name is relative and origin is not None, then
origin will be appended to it.
@type origin: dns.name.Name object
@raises NeedAbsoluteNameOrOrigin: All names in wire format are
absolute. If self is a relative name, then an origin must be supplied;
if it is missing, then this exception is raised
@rtype: string
"""
if not self.is_absolute():
if origin is None or not origin.is_absolute():
raise NeedAbsoluteNameOrOrigin
labels = list(self.labels)
labels.extend(list(origin.labels))
else:
labels = self.labels
dlabels = ["%s%s" % (chr(len(x)), x.lower()) for x in labels]
return ''.join(dlabels)
def to_wire(self, file = None, compress = None, origin = None):
"""Convert name to wire format, possibly compressing it.
@param file: the file where the name is emitted (typically
a cStringIO file). If None, a string containing the wire name
will be returned.
@type file: file or None
@param compress: The compression table. If None (the default) names
will not be compressed.
@type compress: dict
@param origin: If the name is relative and origin is not None, then
origin will be appended to it.
@type origin: dns.name.Name object
@raises NeedAbsoluteNameOrOrigin: All names in wire format are
absolute. If self is a relative name, then an origin must be supplied;
if it is missing, then this exception is raised
"""
if file is None:
file = cStringIO.StringIO()
want_return = True
else:
want_return = False
if not self.is_absolute():
if origin is None or not origin.is_absolute():
raise NeedAbsoluteNameOrOrigin
labels = list(self.labels)
labels.extend(list(origin.labels))
else:
labels = self.labels
i = 0
for label in labels:
n = Name(labels[i:])
i += 1
if not compress is None:
pos = compress.get(n)
else:
pos = None
if not pos is None:
value = 0xc000 + pos
s = struct.pack('!H', value)
file.write(s)
break
else:
if not compress is None and len(n) > 1:
pos = file.tell()
if pos < 0xc000:
compress[n] = pos
l = len(label)
file.write(chr(l))
if l > 0:
file.write(label)
if want_return:
return file.getvalue()
def __len__(self):
"""The length of the name (in labels).
@rtype: int
"""
return len(self.labels)
def __getitem__(self, index):
return self.labels[index]
def __getslice__(self, start, stop):
return self.labels[start:stop]
def __add__(self, other):
return self.concatenate(other)
def __sub__(self, other):
return self.relativize(other)
def split(self, depth):
"""Split a name into a prefix and suffix at depth.
@param depth: the number of labels in the suffix
@type depth: int
@raises ValueError: the depth was not >= 0 and <= the length of the
name.
@returns: the tuple (prefix, suffix)
@rtype: tuple
"""
l = len(self.labels)
if depth == 0:
return (self, dns.name.empty)
elif depth == l:
return (dns.name.empty, self)
elif depth < 0 or depth > l:
raise ValueError('depth must be >= 0 and <= the length of the name')
return (Name(self[: -depth]), Name(self[-depth :]))
def concatenate(self, other):
"""Return a new name which is the concatenation of self and other.
@rtype: dns.name.Name object
@raises AbsoluteConcatenation: self is absolute and other is
not the empty name
"""
if self.is_absolute() and len(other) > 0:
raise AbsoluteConcatenation
labels = list(self.labels)
labels.extend(list(other.labels))
return Name(labels)
def relativize(self, origin):
"""If self is a subdomain of origin, return a new name which is self
relative to origin. Otherwise return self.
@rtype: dns.name.Name object
"""
if not origin is None and self.is_subdomain(origin):
return Name(self[: -len(origin)])
else:
return self
def derelativize(self, origin):
"""If self is a relative name, return a new name which is the
concatenation of self and origin. Otherwise return self.
@rtype: dns.name.Name object
"""
if not self.is_absolute():
return self.concatenate(origin)
else:
return self
def choose_relativity(self, origin=None, relativize=True):
"""Return a name with the relativity desired by the caller. If
origin is None, then self is returned. Otherwise, if
relativize is true the name is relativized, and if relativize is
false the name is derelativized.
@rtype: dns.name.Name object
"""
if origin:
if relativize:
return self.relativize(origin)
else:
return self.derelativize(origin)
else:
return self
def parent(self):
"""Return the parent of the name.
@rtype: dns.name.Name object
@raises NoParent: the name is either the root name or the empty name,
and thus has no parent.
"""
if self == root or self == empty:
raise NoParent
return Name(self.labels[1:])
root = Name([''])
empty = Name([])
def from_unicode(text, origin = root):
"""Convert unicode text into a Name object.
Lables are encoded in IDN ACE form.
@rtype: dns.name.Name object
"""
if not isinstance(text, unicode):
raise ValueError("input to from_unicode() must be a unicode string")
if not (origin is None or isinstance(origin, Name)):
raise ValueError("origin must be a Name or None")
labels = []
label = u''
escaping = False
edigits = 0
total = 0
if text == u'@':
text = u''
if text:
if text == u'.':
return Name(['']) # no Unicode "u" on this constant!
for c in text:
if escaping:
if edigits == 0:
if c.isdigit():
total = int(c)
edigits += 1
else:
label += c
escaping = False
else:
if not c.isdigit():
raise BadEscape
total *= 10
total += int(c)
edigits += 1
if edigits == 3:
escaping = False
label += chr(total)
elif c == u'.' or c == u'\u3002' or \
c == u'\uff0e' or c == u'\uff61':
if len(label) == 0:
raise EmptyLabel
labels.append(encodings.idna.ToASCII(label))
label = u''
elif c == u'\\':
escaping = True
edigits = 0
total = 0
else:
label += c
if escaping:
raise BadEscape
if len(label) > 0:
labels.append(encodings.idna.ToASCII(label))
else:
labels.append('')
if (len(labels) == 0 or labels[-1] != '') and not origin is None:
labels.extend(list(origin.labels))
return Name(labels)
def from_text(text, origin = root):
"""Convert text into a Name object.
@rtype: dns.name.Name object
"""
if not isinstance(text, str):
if isinstance(text, unicode) and sys.hexversion >= 0x02030000:
return from_unicode(text, origin)
else:
raise ValueError("input to from_text() must be a string")
if not (origin is None or isinstance(origin, Name)):
raise ValueError("origin must be a Name or None")
labels = []
label = ''
escaping = False
edigits = 0
total = 0
if text == '@':
text = ''
if text:
if text == '.':
return Name([''])
for c in text:
if escaping:
if edigits == 0:
if c.isdigit():
total = int(c)
edigits += 1
else:
label += c
escaping = False
else:
if not c.isdigit():
raise BadEscape
total *= 10
total += int(c)
edigits += 1
if edigits == 3:
escaping = False
label += chr(total)
elif c == '.':
if len(label) == 0:
raise EmptyLabel
labels.append(label)
label = ''
elif c == '\\':
escaping = True
edigits = 0
total = 0
else:
label += c
if escaping:
raise BadEscape
if len(label) > 0:
labels.append(label)
else:
labels.append('')
if (len(labels) == 0 or labels[-1] != '') and not origin is None:
labels.extend(list(origin.labels))
return Name(labels)
def from_wire(message, current):
"""Convert possibly compressed wire format into a Name.
@param message: the entire DNS message
@type message: string
@param current: the offset of the beginning of the name from the start
of the message
@type current: int
@raises dns.name.BadPointer: a compression pointer did not point backwards
in the message
@raises dns.name.BadLabelType: an invalid label type was encountered.
@returns: a tuple consisting of the name that was read and the number
of bytes of the wire format message which were consumed reading it
@rtype: (dns.name.Name object, int) tuple
"""
if not isinstance(message, str):
raise ValueError("input to from_wire() must be a byte string")
labels = []
biggest_pointer = current
hops = 0
count = ord(message[current])
current += 1
cused = 1
while count != 0:
if count < 64:
labels.append(message[current : current + count])
current += count
if hops == 0:
cused += count
elif count >= 192:
current = (count & 0x3f) * 256 + ord(message[current])
if hops == 0:
cused += 1
if current >= biggest_pointer:
raise BadPointer
biggest_pointer = current
hops += 1
else:
raise BadLabelType
count = ord(message[current])
current += 1
if hops == 0:
cused += 1
labels.append('')
return (Name(labels), cused)
|
dinhkhanh/trac | refs/heads/master | sample-plugins/workflow/MilestoneOperation.py | 2 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Franz Mayer <[email protected]>
#
# "THE BEER-WARE LICENSE" (Revision 42):
# <[email protected]> wrote this file. As long as you retain this
# notice you can do whatever you want with this stuff. If we meet some day,
# and you think this stuff is worth it, you can buy me a beer in return.
# Franz Mayer
#
# Author: Franz Mayer <[email protected]>
from genshi.builder import tag
from trac.core import Component, implements
from trac.resource import ResourceNotFound
from trac.ticket.api import ITicketActionController
from trac.ticket.default_workflow import ConfigurableTicketWorkflow
from trac.ticket.model import Milestone
from trac.util.translation import _
from trac.web.chrome import add_warning
revision = "$Rev$"
url = "$URL$"
class MilestoneOperation(Component):
"""Sets milestone for specific status.
=== Example ===
{{{
[ticket-workflow]
resolve.operations = set_resolution,set_milestone
resolve.milestone = invalid,wontfix,duplicate,worksforme->rejected
}}}
When setting status to `duplicate` the milestone will automatically change
to `rejected`.
'''Note:''' if user has changed milestone manually, this workflow operation
has ''no effect''!
=== Configuration ===
Don't forget to add `MilestoneOperation` to the workflow option
in `[ticket]` section. If there is no workflow option, the line will look
like this:
{{{
[ticket]
workflow = ConfigurableTicketWorkflow,MilestoneOperation
}}}
"""
implements(ITicketActionController)
def get_ticket_actions(self, req, ticket):
actions_we_handle = []
if req.authname != 'anonymous' and \
'TICKET_MODIFY' in req.perm(ticket.resource):
controller = ConfigurableTicketWorkflow(self.env)
actions_we_handle = controller.get_actions_by_operation_for_req(
req, ticket, 'set_milestone')
self.log.debug('set_milestone handles actions: %r' % actions_we_handle)
return actions_we_handle
def get_all_status(self):
return []
def render_ticket_action_control(self, req, ticket, action):
actions = ConfigurableTicketWorkflow(self.env).actions
label = actions[action]['name']
res_ms = self.__get_resolution_milestone_dict(ticket, action)
resolutions = ''
milestone = None
for i, resolution in enumerate(res_ms):
if i > 0:
resolutions = "%s, '%s'" % (resolutions, resolution)
else:
resolutions = "'%s'" % resolution
milestone = res_ms[resolution]
hint = _("For resolution %(resolutions)s the milestone will be "
"set to '%(milestone)s'.",
resolutions=resolutions, milestone=milestone)
return (label, None, hint)
def get_ticket_changes(self, req, ticket, action):
if action == 'resolve' and \
req.args and 'action_resolve_resolve_resolution' in req.args:
old_milestone = ticket._old.get('milestone') or None
user_milestone = ticket['milestone'] or None
# If there's no user defined milestone, we try to set it
# using the defined resolution -> milestone mapping.
if old_milestone is None:
new_status = req.args['action_resolve_resolve_resolution']
new_milestone = self.__get_new_milestone(ticket, action,
new_status)
# ... but we don't reset it to None unless it was None
if new_milestone is not None or user_milestone is None:
try:
milestone = Milestone(self.env, new_milestone)
self.log.info('changed milestone from %s to %s' %
(old_milestone, new_milestone) )
return {'milestone': new_milestone}
except ResourceNotFound:
add_warning(req, _("Milestone %(name)s does not exist.",
name=new_milestone))
return {}
def apply_action_side_effects(self, req, ticket, action):
pass
def __get_new_milestone(self, ticket, action, new_status):
"""Determines the new status"""
if new_status:
res_ms = self.__get_resolution_milestone_dict(ticket, action)
return res_ms.get(new_status)
def __get_resolution_milestone_dict(self, ticket, action):
transitions = self.config.get('ticket-workflow',
action + '.milestone').strip()
transition = [x.strip() for x in transitions.split('->')]
resolutions = [y.strip() for y in transition[0].split(',')]
res_milestone = {}
for res in resolutions:
res_milestone[res] = transition[1]
return res_milestone
|
endlessm/chromium-browser | refs/heads/master | third_party/catapult/dashboard/dashboard/pinpoint/models/change/change.py | 1 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from dashboard.pinpoint.models.change import commit as commit_module
from dashboard.pinpoint.models.change import patch as patch_module
class Change(collections.namedtuple('Change', ('commits', 'patch'))):
"""A particular set of Commits with or without an additional patch applied.
For example, a Change might sync to src@9064a40 and catapult@8f26966,
then apply patch 2423293002.
"""
__slots__ = ()
def __new__(cls, commits, patch=None):
"""Creates a Change.
Args:
commits: An iterable of Commits representing this Change's dependencies.
patch: An optional Patch to apply to the Change.
"""
if not (commits or patch):
raise TypeError('At least one commit or patch required.')
return super(Change, cls).__new__(cls, tuple(commits), patch)
def __str__(self):
"""Returns an informal short string representation of this Change."""
string = ' '.join(str(commit) for commit in self.commits)
if self.patch:
string += ' + ' + str(self.patch)
return string
@property
def id_string(self):
"""Returns a string that is unique to this set of commits and patch.
This method treats the commits as unordered. chromium@a v8@b is the same as
v8@b chromium@a. This is useful for looking up a build with this Change.
"""
string = ' '.join(commit.id_string for commit in sorted(self.commits))
if self.patch:
string += ' + ' + self.patch.id_string
return string
@property
def base_commit(self):
return self.commits[0]
@property
def last_commit(self):
return self.commits[-1]
@property
def deps(self):
return tuple(self.commits[1:])
def Update(self, other):
"""Updates this Change with another Change and returns it as a new Change.
Similar to OrderedDict.update(), for each Commit in the other Change:
* If the Commit's repository already exists in this Change,
override the git hash with the other Commit's git hash.
* Otherwise, add the Commit to this Change.
Also apply the other Change's patches to this Change.
Since Changes are immutable, this method returns a new Change instead of
modifying the existing Change.
Args:
other: The overriding Change.
Returns:
A new Change object.
"""
commits = collections.OrderedDict(self.commits)
commits.update(other.commits)
commits = tuple(commit_module.Commit(repository, git_hash)
for repository, git_hash in commits.items())
if self.patch and other.patch:
raise NotImplementedError(
"Pinpoint builders don't yet support multiple patches.")
patch = self.patch or other.patch
return Change(commits, patch)
def AsDict(self):
result = {
'commits': [commit.AsDict() for commit in self.commits],
}
if self.patch:
result['patch'] = self.patch.AsDict()
return result
@classmethod
def FromData(cls, data):
if isinstance(data, basestring):
return cls.FromUrl(data)
else:
return cls.FromDict(data)
@classmethod
def FromUrl(cls, url):
try:
return cls((commit_module.Commit.FromUrl(url),))
except (KeyError, ValueError):
return cls((), patch=patch_module.GerritPatch.FromUrl(url))
@classmethod
def FromDict(cls, data):
commits = tuple(commit_module.Commit.FromDict(commit)
for commit in data['commits'])
if data.get('patch') is not None:
patch = patch_module.GerritPatch.FromDict(data['patch'])
else:
patch = None
return cls(commits, patch=patch)
@classmethod
def Midpoint(cls, change_a, change_b):
"""Returns a Change halfway between the two given Changes.
This function does two passes over the Changes' Commits:
* The first pass attempts to match the lengths of the Commit lists by
expanding DEPS to fill in any repositories that are missing from one,
but included in the other.
* The second pass takes the midpoint of every matched pair of Commits,
expanding DEPS rolls as it comes across them.
A NonLinearError is raised if there is no valid midpoint. The Changes are
not linear if any of the following is true:
* They have different patches.
* Their repositories don't match even after expanding DEPS rolls.
* The left Change comes after the right Change.
* They are the same or adjacent.
See change_test.py for examples of linear and nonlinear Changes.
Args:
change_a: The first Change in the range.
change_b: The last Change in the range.
Returns:
A new Change representing the midpoint.
The Change before the midpoint if the range has an even number of commits.
Raises:
NonLinearError: The Changes are not linear.
"""
if change_a.patch != change_b.patch:
raise commit_module.NonLinearError(
'Change A has patch "%s" and Change B has patch "%s".' %
(change_a.patch, change_b.patch))
commits_a = list(change_a.commits)
commits_b = list(change_b.commits)
_ExpandDepsToMatchRepositories(commits_a, commits_b)
commits_midpoint = _FindMidpoints(commits_a, commits_b)
if commits_a == commits_midpoint:
raise commit_module.NonLinearError('Changes are the same or adjacent.')
return cls(commits_midpoint, change_a.patch)
def _ExpandDepsToMatchRepositories(commits_a, commits_b):
"""Expands DEPS in a Commit list to match the repositories in another.
Given two lists of Commits, with one bigger than the other, this function
looks through the DEPS files for smaller commit list to fill out any missing
Commits that are already in the bigger commit list.
Mutates the lists in-place, and doesn't return anything. The lists will not
have the same size if one Commit list contains a repository that is not found
in the DEPS of the other Commit list.
Example:
commits_a == [chromium@a, v8@c]
commits_b == [chromium@b]
This function looks through the DEPS file at chromium@b to find v8, then
appends that v8 Commit to commits_b, making the lists match.
Args:
commits_a: A list of Commits.
commits_b: A list of Commits.
"""
# First, scrub the list of commits of things we shouldn't be looking into.
commits_a[:] = [
c for c in commits_a if commit_module.RepositoryInclusionFilter(c)]
commits_b[:] = [
c for c in commits_b if commit_module.RepositoryInclusionFilter(c)]
# The lists may be given in any order. Let's make commits_b the bigger list.
if len(commits_a) > len(commits_b):
commits_a, commits_b = commits_b, commits_a
# Loop through every DEPS file in commits_a.
for commit_a in commits_a:
if len(commits_a) == len(commits_b):
break
deps_a = commit_a.Deps()
# Look through commits_b for any extra slots to fill with the DEPS.
for commit_b in commits_b[len(commits_a):]:
dep_a = _FindRepositoryUrlInDeps(deps_a, commit_b.repository_url)
if dep_a:
dep_commit = commit_module.Commit.FromDep(dep_a)
if dep_commit is not None:
commits_a.append(dep_commit)
else:
break
def _FindMidpoints(commits_a, commits_b):
"""Returns the midpoint of two Commit lists.
Loops through each pair of Commits and takes the midpoint. If the repositories
don't match, a NonLinearError is raised. If the Commits are adjacent and
represent a DEPS roll, the differing DEPs are added to the end of the lists.
Args:
commits_a: A list of Commits.
commits_b: A list of Commits.
Returns:
A list of Commits, each of which is the midpoint of the respective Commit in
commits_a and commits_b.
Raises:
NonLinearError: The lists have a different number of commits even after
expanding DEPS rolls, a Commit pair contains differing repositories, or a
Commit pair is in the wrong order.
"""
commits_midpoint = []
for commit_a, commit_b in zip_longest(commits_a, commits_b):
if not (commit_a and commit_b):
# If the commit lists are not the same length, bail out. That could happen
# if commits_b has a repository that was not found in the DEPS of
# commits_a (or vice versa); or a DEPS roll added or removed a DEP.
raise commit_module.NonLinearError(
'Changes have a different number of commits.')
commit_midpoint = commit_module.Commit.Midpoint(commit_a, commit_b)
commits_midpoint.append(commit_midpoint)
if commit_a == commit_midpoint and commit_midpoint != commit_b:
# Commits are adjacent.
# Add any DEPS changes to the commit lists.
deps_a = commit_a.Deps()
deps_b = commit_b.Deps()
dep_commits_a = sorted(
commit_module.Commit.FromDep(dep) for dep in deps_a.difference(deps_b)
if not _FindRepositoryUrlInCommits(commits_a, dep.repository_url))
dep_commits_b = sorted(
commit_module.Commit.FromDep(dep) for dep in deps_b.difference(deps_a)
if not _FindRepositoryUrlInCommits(commits_b, dep.repository_url))
commits_a += [c for c in dep_commits_a if c is not None]
commits_b += [c for c in dep_commits_b if c is not None]
return commits_midpoint
def _FindRepositoryUrlInDeps(deps, repository_url):
for dep in deps:
if dep[0] == repository_url:
return dep
return None
def _FindRepositoryUrlInCommits(commits, repository_url):
for commit in commits:
if commit.repository_url == repository_url:
return commit
return None
|
imply/chuu | refs/heads/master | chrome/common/extensions/docs/server2/fake_url_fetcher.py | 31 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from future import Future
class _Response(object):
def __init__(self):
self.content = ''
self.headers = { 'content-type': 'none' }
self.status_code = 200
class FakeUrlFetcher(object):
def __init__(self, base_path):
self._base_path = base_path
def _ReadFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _ListDir(self, directory):
# In some tests, we need to test listing a directory from the HTML returned
# from SVN. This reads an HTML file that has the directories HTML.
if not os.path.isdir(os.path.join(self._base_path, directory)):
return self._ReadFile(directory[:-1])
files = os.listdir(os.path.join(self._base_path, directory))
html = '<html><title>Revision: 00000</title>\n'
for filename in files:
if filename.startswith('.'):
continue
if os.path.isdir(os.path.join(self._base_path, directory, filename)):
html += '<a>' + filename + '/</a>\n'
else:
html += '<a>' + filename + '</a>\n'
html += '</html>'
return html
def FetchAsync(self, url):
url = url.rsplit('?', 1)[0]
return Future(value=self.Fetch(url))
def Fetch(self, url):
url = url.rsplit('?', 1)[0]
result = _Response()
if url.endswith('/'):
result.content = self._ListDir(url)
else:
result.content = self._ReadFile(url)
return result
|
ice3/track_a_parcel | refs/heads/reboot | db_create.py | 1 | #!/usr/bin/env python
"""Script to create the database."""
import os.path
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db, app
with app.app_context():
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
|
noemis-fr/old-custom | refs/heads/7.0 | l10n_fr_siret/company.py | 11 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class res_company(orm.Model):
"""Replace the company's fields for SIRET/RC with the partner's"""
_inherit = 'res.company'
def _get_partner_change(self, cr, uid, ids, context=None):
return self.pool['res.company'].search(
cr, uid, [('partner_id', 'in', ids)], context=context)
_columns = {
'siret': fields.related(
'partner_id', 'siret', type='char', string='SIRET', store={
'res.partner': (_get_partner_change, ['siren', 'nic'], 20),
'res.company': (lambda self, cr, uid, ids, c={}:
ids, ['partner_id'], 20), }),
'company_registry': fields.related(
'partner_id', 'company_registry', type='char',
string='Company Registry', store={
'res.partner': (_get_partner_change, ['company_registry'], 20),
'res.company': (lambda self, cr, uid, ids, c={}:
ids, ['partner_id'], 20), })
}
|
vaygr/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/docker.py | 6 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = '''
options:
docker_host:
description:
- "The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, 'tcp://192.0.2.23:2376'. If TLS is used to encrypt the connection,
the module will automatically replace 'tcp' in the connection URL with 'https'."
default: "unix://var/run/docker.sock"
aliases:
- docker_url
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
default: localhost
api_version:
description:
- The version of the Docker API running on the Docker Host. Defaults to the latest version of the API
supported by docker-py.
default: default provided by docker-py
aliases:
- docker_api_version
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
default: 60
cacert_path:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
aliases:
- tls_ca_cert
cert_path:
description:
- Path to the client's TLS certificate file.
aliases:
- tls_client_cert
key_path:
description:
- Path to the client's TLS key file.
aliases:
- tls_client_key
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by docker-py, currently 1.0.
default: "1.0"
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server.
tls_verify:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define DOCKER_HOST, DOCKER_TLS_HOSTNAME, DOCKER_API_VERSION, DOCKER_CERT_PATH, DOCKER_SSL_VERSION,
DOCKER_TLS, DOCKER_TLS_VERIFY and DOCKER_TIMEOUT. If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
https://docker-py.readthedocs.org/en/stable/machine/ for more details.
'''
|
Tagar/incubator-airflow | refs/heads/master | airflow/contrib/operators/bigquery_table_delete_operator.py | 3 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class BigQueryTableDeleteOperator(BaseOperator):
"""
Deletes BigQuery tables
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: string
:param bigquery_conn_id: reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
"""
template_fields = ('deletion_dataset_table',)
ui_color = '#ffd1dc'
@apply_defaults
def __init__(self,
deletion_dataset_table,
bigquery_conn_id='bigquery_default',
delegate_to=None,
ignore_if_missing=False,
*args,
**kwargs):
super(BigQueryTableDeleteOperator, self).__init__(*args, **kwargs)
self.deletion_dataset_table = deletion_dataset_table
self.bigquery_conn_id = bigquery_conn_id
self.delegate_to = delegate_to
self.ignore_if_missing = ignore_if_missing
def execute(self, context):
self.log.info('Deleting: %s', self.deletion_dataset_table)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_table_delete(self.deletion_dataset_table, self.ignore_if_missing)
|
Pluto-tv/chromium-crosswalk | refs/heads/master | tools/telemetry/third_party/webpagereplay/third_party/dns/tsigkeyring.py | 248 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A place to store TSIG keys."""
import base64
import dns.name
def from_text(textring):
"""Convert a dictionary containing (textual DNS name, base64 secret) pairs
into a binary keyring which has (dns.name.Name, binary secret) pairs.
@rtype: dict"""
keyring = {}
for keytext in textring:
keyname = dns.name.from_text(keytext)
secret = base64.decodestring(textring[keytext])
keyring[keyname] = secret
return keyring
def to_text(keyring):
"""Convert a dictionary containing (dns.name.Name, binary secret) pairs
into a text keyring which has (textual DNS name, base64 secret) pairs.
@rtype: dict"""
textring = {}
for keyname in keyring:
keytext = dns.name.to_text(keyname)
secret = base64.encodestring(keyring[keyname])
textring[keytext] = secret
return textring
|
alkyl1978/gnuradio | refs/heads/master | docs/sphinx/hieroglyph/__init__.py | 72 | # We only need to expose the setup function to Sphinx
from .hieroglyph import setup
from .version import __version__
__author__ = 'Robert Smallshire' |
CeltonMcGrath/TACTIC | refs/heads/master | src/pyasm/widget/statistic_wdg.py | 6 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['TaskCompletionWdg', 'CompletionBarWdg', 'CalendarBarWdg', 'CalendarSetCmd', 'TaskGroupCompletionWdg']
from pyasm.common import Date, Calendar, Common, jsonloads
from pyasm.search import *
from pyasm.web import *
from pyasm.biz import *
from pyasm.widget import FilterSelectWdg, FilterCheckboxWdg, IconButtonWdg, IconWdg
from input_wdg import HiddenWdg, CalendarInputWdg
from layout_wdg import TableWdg
import datetime, re
from pyasm.widget import BaseTableElementWdg
#from tactic.ui.common import BaseTableElementWdg
'''DEPRECATED: use the one in tactic.ui.table.TaskCompletionWdg'''
class TaskCompletionWdg(BaseTableElementWdg):
def init(my):
my.is_preprocessed = False
my.data = {}
my.cal_sub_task = None
my.cal_sub_task_value = False
def preprocess(my):
my.total_completion = 0.0
my.num_sobjects = 0
if my.sobjects:
tasks = Task.get_by_sobjects(my.sobjects)
# create a data structure
for task in tasks:
search_type = task.get_value("search_type")
search_id = task.get_value("search_id")
search_key = SearchKey.build_search_key(search_type, search_id, column='id')
sobject_tasks = my.data.get(search_key)
if not sobject_tasks:
sobject_tasks = []
my.data[search_key] = sobject_tasks
sobject_tasks.append(task)
my.is_preprocessed = True
def get_prefs(my):
my.cal_sub_task = FilterCheckboxWdg('calculate_sub_task', \
label='include sub tasks', css='small')
my.cal_sub_task_value = my.cal_sub_task.is_checked()
return my.cal_sub_task
def get_width(my):
'''not used I think'''
width = my.kwargs.get("width")
if not width:
width = 400
return int(width)
def get_text_value(my):
if not my.is_preprocessed:
my.preprocess()
sobject = my.get_current_sobject()
completion = my.get_completion(sobject)
if not completion:
completion = 0
return "%0.1d%%" % completion
def get_display(my):
sobject = my.get_current_sobject()
completion = my.get_completion(sobject)
# completion is compared to None, because a 0% completion is valid
if completion == None:
div = DivWdg("<i>No tasks</i>")
div.add_style("color: #aaa")
return div
widget = DivWdg()
width = my.get_width()
bar_wdg = CompletionBarWdg(completion, width )
widget.add(bar_wdg)
# keep a running tab of the total
my.total_completion += completion
my.num_sobjects += 1
return widget
def get_bottom_wdg(my):
width = my.get_width()
if my.num_sobjects:
completion = my.total_completion / my.num_sobjects
bar_wdg = CompletionBarWdg(completion, width)
else:
bar_wdg = "n/a"
div = DivWdg()
div.add("Total")
div.add("<hr>")
div.add(bar_wdg)
return div
def get_tasks(my, sobject):
''' if the sobject is a task, then just return the sobject, since tasks
do not have tasks. Account for subtask based on preferences. Also
filters out tasks belonging to obsolete processes'''
if isinstance(sobject, Task):
return [sobject]
tasks = my.data.get( SearchKey.get_by_sobject(sobject, use_id=True) )
if tasks == None:
tasks = Task.get_by_sobjects([sobject])
# make sure we only take tasks in the pipeline into account
pipeline = Pipeline.get_by_sobject(sobject)
recurse = False
if my.cal_sub_task_value:
recurse = True
if pipeline:
processes = pipeline.get_process_names(recurse=recurse)
filtered_tasks = []
for task in tasks:
if task.get_value("process") not in processes:
continue
filtered_tasks.append(task)
return filtered_tasks
else:
return tasks
def get_completion(my, sobject):
my.tasks = my.get_tasks(sobject)
percent = 0
# count the tasks with invalid or obsolete status
#invalid_count = 0
for task in my.tasks:
status_attr = task.get_attr("status")
task_percent = status_attr.get_percent_completion()
if task_percent < 0:
task_percent = 0
#invalid_count += 1
percent += task_percent
if my.tasks:
# NOT sure if I should subtract total # of tasks by invalid
# task, leave it for now
percent = int(percent / len(my.tasks))
else:
return None
return percent
def handle_td(my, td):
td.add_style('vertical-align','middle')
sobject = my.get_current_sobject()
completion = my.get_completion(sobject)
td.add_attr("spt_input_value", completion)
class TaskGroupCompletionWdg(TaskCompletionWdg):
ARGS_KEYS = {
'options': {
'type': 'TextAreaWdg',
'description': 'A list of options for the various completion bars. e.g. [{"label":"MODEL", "context": ["model","rig"]}] '
},
}
def preprocess(my):
my.options = my.get_option('options')
if my.options:
try:
my.group_list = jsonloads(my.options)
except:
my.group_list = [{'label': 'Syntax Error', 'context':[]}]
else:
my.group_list = [{'label':'default', 'context': []}]
super(TaskGroupCompletionWdg, my).preprocess()
def init(my):
# these 2 are used for bottom summary
my.total_completion_dict = {}
my.num_sobjects = 0
super(TaskGroupCompletionWdg, my).init()
def get_bottom_wdg(my):
if my.total_completion_dict:
table = Table()
col = table.add_col()
col = table.add_col()
col.add_style('width','80%')
for group in my.group_list:
group_label = group.get('label')
completion = my.total_completion_dict.get(group_label)/ my.num_sobjects
group_contexts = group.get('context')
if group_contexts:
group_contexts = ', '.join(group_contexts)
width = my.get_width()
bar_wdg = CompletionBarWdg(completion, width)
label_div = FloatDivWdg('%s: ' %group_label)
label_div.add_style('margin-right: 4px')
label_div.add_tip(group_contexts, group_contexts)
table.add_row()
table.add_cell(label_div)
table.add_cell(bar_wdg)
return table
width = my.get_width()
completion = 0
if my.num_sobjects:
completion = my.total_completion / my.num_sobjects
div = DivWdg()
div.add("Total")
div.add("<hr>")
bar_wdg = CompletionBarWdg(completion, width)
div.add(bar_wdg)
return div
def get_group_completion(my, items):
'''get the avg completion'''
sum = 0
if not items:
return 0
for item in items:
sum += item
avg = sum / len(items)
return avg
def get_text_value(my):
sobject = my.get_current_sobject()
if sobject.get_id() == -1:
return ''
my.calculate(sobject)
output = []
for group in my.group_list:
group_label = group.get('label')
group_contexts = group.get('context')
if group_contexts:
group_contexts = ', '.join(group_contexts)
group_completion = my.completion_dict.get(group_label)
completion = my.get_group_completion(group_completion)
output.append('%s: %s%%'%(group_label, completion))
return '\n'.join(output)
def calculate(my, sobject):
'''do the calculation'''
tasks = my.get_tasks(sobject)
completion = ''
my.completion_dict = {}
for group in my.group_list:
group_label = group.get('label')
group_contexts = group.get('context')
if not group_label:
continue
for task in tasks:
context = task.get_value('context')
if context in group_contexts:
completion = my.get_completion(task)
group_completion = my.completion_dict.get(group_label)
if group_completion == None:
my.completion_dict[group_label] = [completion]
else:
group_completion.append(completion)
def get_display(my):
sobject = my.get_current_sobject()
if sobject.get_id() == -1:
return ''
my.calculate(sobject)
# completion is compared to None, because a 0% completion is valid
if not my.completion_dict:
if my.group_list and my.group_list[0].get('label')=='Syntax Error':
div = DivWdg("<i>Syntax Error in Column Definition</i>")
elif my.group_list and my.group_list[0].get('label')=='default':
div = DivWdg("<i>Fill in the options e.g. [{'label':'MODEL', 'context': ['model','rig']}] </i>")
else:
div = DivWdg("<i>No tasks</i>")
div.add_style("color: #aaa")
return div
table = Table()
col = table.add_col()
col = table.add_col()
col.add_style('width','80%')
for group in my.group_list:
group_label = group.get('label')
group_contexts = group.get('context')
if group_contexts:
group_contexts = ', '.join(group_contexts)
group_completion = my.completion_dict.get(group_label)
completion = my.get_group_completion(group_completion)
width = my.get_width()
bar_wdg = CompletionBarWdg(completion, width)
label_div = FloatDivWdg('%s: ' %group_label)
label_div.add_style('margin-right: 4px')
label_div.add_tip(group_contexts, group_contexts)
table.add_row()
table.add_cell(label_div)
table.add_cell(bar_wdg)
#widget.add(HtmlElement.br())
completed_summary = my.total_completion_dict.get(group_label)
if not completed_summary:
completed_summary = 0
my.total_completion_dict[group_label] = completion + completed_summary
my.num_sobjects += 1
return table
class CompletionBarWdg(DivWdg):
def __init__(my, percent, length):
if not percent:
percent = 0
my.percent = percent
#my.percent = 100
my.length = length
super(CompletionBarWdg, my).__init__()
def init(my):
#my.add_style("width", my.length + 50)
my.add_style("font-size", "0.8em")
width = int(my.length*(float(my.percent)/100))
if width == 0:
width = 1
percent_str = HtmlElement.span("%s%% " % my.percent )
percent_str.add_style("float: right")
percent_str.add_style("color: white")
bar = FloatDivWdg()
bar.add(" ")
#bar.add_style("width", width)
bar.add_style("width", "%s%%" % (70*my.percent/100))
bar.add_style("border: 1px solid #aaa")
color_code = my._get_color_code(my.percent)
bar.add_class("completion %s" % my._get_bar_color_code(my.percent) )
bar.add_style("background-color", color_code)
bar.add_style("float", "left")
my.add(bar)
percent = FloatDivWdg("%0.1f%%" % my.percent, css='larger')
percent.add_style('padding', '2px 0 0 4px')
my.add( percent )
def _get_bar_color_code(my, percent):
''' get a color code based on percentage of task completion '''
color = "grey"
if percent == 100:
color = "green"
elif percent >= 80:
color = "blue"
elif percent >= 40:
color = "yellow"
elif percent >= 20:
color = "red"
return color
def _get_color_code(my, percent):
''' get a color code based on percentage of task completion '''
color = "#ddd"
if percent == 100:
color = "#b5e868"
elif percent > 80:
color = "#b5e868"
elif percent > 50:
color = "#e8e268"
elif percent > 30:
color = "#e8c268"
elif percent > 10:
color = "#e86868"
return color
class CalendarBarWdg(BaseTableElementWdg):
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
LEFT_MARGIN = 60
CAL_INPUT = "calendar"
def __init__(my):
my.calendar = None
my.always_recal = False
my.user_defined_bound = True
my.show_days = False
my.valid_date = True
my.statuses = {}
super(CalendarBarWdg, my).__init__()
def get_text_value(my):
sobject = my.get_current_sobject()
"""
start_value = sobject.get_value("bid_start_date")
end_value = sobject.get_value("bid_end_date")
if start_value:
start = Date(start_value)
start_display = start.get_display_date()
else:
start_display = ""
if end_value:
end = Date(end_value)
end_display = end.get_display_date()
else:
end_display = ""
return "%s - %s" % (start_display, end_display)
"""
start_date = sobject.get_value("bid_start_date")
start = ''
end = ''
if start_date:
start = Date(start_date).get_display_date()
end_date = sobject.get_value("bid_end_date")
if end_date:
end = Date(end_date).get_display_date()
# if bid end date does not exist, try bid duration
if start_date != "" and end_date == "":
bid_duration = sobject.get_value("bid_duration")
if bid_duration != "":
date = Date(start_date)
date.add_days(bid_duration)
end_date = date.get_db_time()
end = Date(end_date).get_display_date()
return "%s - %s" % (start, end)
def get_info(my):
# when used strictly as a BaseTableElement, there is no need to recalculate
if not my.always_recal and hasattr(my, "start_year") and hasattr(my, "end_year"):
return
# create the calendar only if it is needed
# create only if necessary since the on script is linked to the
# cal_name of the CalendarInputWdg
if not my.calendar:
my.calendar = CalendarInputWdg(my.CAL_INPUT)
my.calendar.set_show_on_wdg(False)
my.calendar.set_show_value(False)
# if this is in ajax, then try to recreate the widget
web = WebContainer.get_web()
# TODO: this code should be put into an ajax class
ajax_class = web.get_form_value("widget")
my.is_ajax = False
is_tbody_swap = False
if ajax_class and web.get_form_value("ajax") == "true":
from pyasm.common import Common
module, class_name = Common.breakup_class_path(ajax_class)
if class_name == my.__class__.__name__:
my.is_ajax = True
elif class_name == 'TbodyWdg':
is_tbody_swap = True
if my.is_ajax:
search_key = web.get_form_value("search_key")
sobject = Search.get_by_search_key(search_key)
my.set_sobject(sobject)
my.actual_edit = web.get_form_value("actual_edit")
my.bid_edit = web.get_form_value("bid_edit")
my.start_year = web.get_int_form_value("start_year")
my.end_year = web.get_int_form_value("end_year")
my.start_month = web.get_int_form_value("start_month")
my.end_month = web.get_int_form_value("end_month")
my.width = web.get_int_form_value("calendar_width")
my.cal_margin = web.get_int_form_value("calendar_margin")
else:
my.bid_edit = my.get_option("bid_edit")
my.actual_edit = my.get_option("actual_edit")
my.width = my.get_option("width")
if my.width == "":
my.width = 400
else:
my.width = int(my.width)
my.cal_margin = my.get_option("cal_margin")
if not my.cal_margin:
my.cal_margin = 1
else:
my.cal_margin = int(my.cal_margin)
# determine date ranges
start_date = None
end_date = None
for sobject in my.sobjects:
bid_start_date = str(sobject.get_value("bid_start_date"))
if bid_start_date != "":
if not start_date or bid_start_date < start_date:
start_date = bid_start_date
actual_start_date = str(sobject.get_value("actual_start_date",no_exception=True))
if actual_start_date != "":
if actual_start_date < start_date:
start_date = actual_start_date
bid_end_date = sobject.get_value("bid_end_date")
# if bid end date does not exist, try bid duration
if bid_start_date != "" and bid_end_date == "":
bid_duration = sobject.get_value("bid_duration")
if bid_duration != "":
date = Date(bid_start_date)
date.add_days(bid_duration)
bid_end_date = date.get_db_time()
if bid_end_date:
# necessary to check for None end_date
if not end_date or str(bid_end_date) > str(end_date):
end_date = bid_end_date
actual_end_date = sobject.get_value("actual_end_date",no_exception=True)
if actual_end_date:
if str(actual_end_date) > str(end_date):
end_date = actual_end_date
if start_date and end_date and my.sobjects:
start_date, time = str(start_date).split(" ")
my.start_year, my.start_month, tmp = [int(x) for x in start_date.split("-")]
end_date, time = str(end_date).split(" ")
my.end_year, my.end_month, tmp = [int(x) for x in end_date.split("-")]
else:
my.start_year = datetime.date.today().year
my.start_month = 1
my.end_year = datetime.date.today().year
my.end_month = 12
# the calendar boundaries can be overriden thru the left/right arrows control
# this is not needed when it is part of the SObjectTaskTableElement
if my.user_defined_bound:
my.left_bound_hid = HiddenWdg('cal_left_control_hid')
my.left_bound_hid.set_persistence()
my.right_bound_hid = HiddenWdg('cal_right_control_hid')
my.right_bound_hid.set_persistence()
my.week_hid_wdg = HiddenWdg('cal_week_hid')
left_bound = my.left_bound_hid.get_value()
right_bound = my.right_bound_hid.get_value()
if left_bound and re.match(r'\d{4}:\w{3}', left_bound):
left_bound = left_bound.split(':')
my.start_year = int(left_bound[0])
my.start_month = my.MONTHS.index(left_bound[1]) + 1
if right_bound and re.match(r'\d{4}:\w{3}', right_bound):
right_bound = right_bound.split(':')
my.end_year = int(right_bound[0])
my.end_month = my.MONTHS.index(right_bound[1]) + 1
# determine the month range for tbody swap
if is_tbody_swap:
month_info = web.get_form_value('months_info')
my.num_months, my.first_month, my.left_year_bound = month_info.split(':')
my.num_months = int(my.num_months)
my.left_year_bound = int(my.left_year_bound)
return
my.week_hid = web.get_int_form_value("cal_week_hid")
# my.months store a list of (month, year) names to be drawn at the title area
my.months = []
left_month_bound = my.start_month - 1 - my.cal_margin
right_month_bound = my.end_month -1 + my.cal_margin
# my.start_year is preserved for ajax while my.left_year_bound
# is recalulated every time
my.left_year_bound = my.start_year
my.right_year_bound = my.end_year + 1
while left_month_bound < 0:
left_month_bound += 12
my.left_year_bound -= 1
while right_month_bound > 11:
right_month_bound -= 12
my.right_year_bound += 1
for year in range(my.left_year_bound, my.right_year_bound):
for i in range(left_month_bound, len(CalendarBarWdg.MONTHS)):
month = CalendarBarWdg.MONTHS[i]
my.months.append((month, year))
if year == my.right_year_bound - 1 and i >= right_month_bound:
break
# reset month index
left_month_bound = 0
# prepare values used for calculating the bar width and start position
# my.left_year_bound above is one of them
if not my.months:
for i in range(0,11):
my.months.append((CalendarBarWdg.MONTHS[i],2007))
my.num_months = len(my.months)
my.first_month = my.months[0][0]
def get_prefs(my):
span = SpanWdg("width: ", css="med")
my.width_select = FilterSelectWdg("calendar_width")
my.width_select.set_option("values", "200|400|620|800|1000")
my.width_select.add_empty_option("default")
value = my.width_select.get_value()
if value != "":
my.set_option("width", value)
span.add(my.width_select)
span.add("px")
span2 = SpanWdg("margin: ", css='med')
my.margin_select = FilterSelectWdg("calendar_margin")
my.margin_select.set_option("values", "0|1|2|3|4")
value = my.margin_select.get_value()
if value != "":
my.set_option("cal_margin", value)
span2.add(my.margin_select)
span2.add('months')
span.add(span2)
pref_show_day = FilterCheckboxWdg('show_days', label='Show days')
if pref_show_day.get_value():
my.show_days = True
span.add(pref_show_day)
return span
def get_calendar(my):
'''this can be called to return a crucial component for this
widget to function if not used directly as a BaseTableElement'''
my.get_info()
widget = Widget()
hidden = HiddenWdg("calendar_column", "")
widget.add(hidden)
widget.add(my.calendar)
return widget
def set_always_recal(my, recal):
my.always_recal = recal
def set_user_defined_bound(my, bound):
my.user_defined_bound = bound
def _get_control_div(my, control_id, other_control_id, control_hidden, other_control_hidden,\
bound):
''' get a control div to set the range of calendar to display '''
left_div = FloatDivWdg(width =my.LEFT_MARGIN, css='center_content')
left_div.add(control_hidden)
control_name = control_hidden.get_input_name()
other_control_name = other_control_hidden.get_input_name()
left_bound_val = control_hidden.get_value()
if not left_bound_val:
default_value = ''
if bound == 'left':
default_value = '%s:%s' %(my.months[0][1], my.months[0][0])
else:
default_value = '%s:%s' %(my.months[-1][1], my.months[-1][0])
left_bound_val = default_value
left_info = DivWdg(left_bound_val, id=control_id, css='hand')
left_info.add_class("calendar_nav")
left_info.add_event('onclick', 'document.form.submit()')
left_div.add(left_info)
icon = IconButtonWdg(name='prev month', icon=IconWdg.ARROW_LEFT)
icon.add_event('onclick', "TacticCalendarLabel.update_range('%s', '%s', '%s', '%s', 'backward', '%s')" \
%(control_id, other_control_id, control_name, other_control_name, bound))
left_div.add(icon)
icon = IconButtonWdg(name='next month', icon=IconWdg.ARROW_RIGHT)
icon.add_event('onclick', "TacticCalendarLabel.update_range('%s', '%s', '%s', '%s', 'forward', '%s')" \
%(control_id, other_control_id, control_name, other_control_name, bound))
left_div.add(icon)
return left_div
def get_title(my):
# initialtize
my.get_info()
# division round out error, 3px
margin_error = 3.0
main_div = FloatDivWdg()
main_width = my.width + (my.LEFT_MARGIN * 2) + margin_error
main_div.add_style("width", main_width)
# add the left control
left_div = my._get_control_div('cal_left_control_id', 'cal_right_control_id', \
my.left_bound_hid, my.right_bound_hid, 'left')
main_div.add(my.week_hid_wdg)
main_div.add(left_div)
# create the calendar label area
div = FloatDivWdg(id='cal_label_area')
# this width seems irrelevant
div.add_style("width", my.width + margin_error)
div.add_style("font-size: 0.8em")
main_div.add(div)
# write some hidden calendar info
div.add(my.get_calendar())
# add the right control
right_div = my._get_control_div('cal_right_control_id', 'cal_left_control_id',\
my.right_bound_hid, my.left_bound_hid, 'right')
main_div.add(right_div)
# months_info is used for remembering what the calendar range is like
# for tbody replacement
div.add(HiddenWdg('months_info', '%s:%s:%s' \
%(my.num_months, my.first_month, my.left_year_bound)))
# draw year divs container
year_main_div = Widget()
#year_main_div.add_stylcal_right_control_ide('width', my.width + margin_error)
div.add(year_main_div)
div.add(HtmlElement.br())
# this is less stringent and different from my.is_ajax which is
# used when the CalendarBarWdg is updated
is_ajax = CalendarBarWdg.is_ajax(check_name=False)
# draw months
year_widths = []
year_width = 0
# NOTE: my.months is a list of tuple (month, year)
last_year = my.months[0][1]
for idx, month in enumerate(my.months):
year = month[1]
# accurate decimals are necessary
month_width = '%.2f' %(float(my.width)/my.num_months)
month_width = float(month_width)
# collect the required year_div's width for later
if year != last_year:
last_year = year
year_widths.append(year_width)
year_width = 0
if idx == len(my.months) - 1:
year_width += float(month_width)
year_widths.append(year_width)
month_id = '%s_%s' %(year, month[0])
label = SpanWdg(month[0], id = month_id, css='label_out')
label.add_event('onmouseover',"Effects.color('%s','label_over')" %month_id)
label.add_event('onmouseout',"Effects.color('%s','label_out')" %month_id)
month_span = FloatDivWdg(label)
year_width += month_width
month_span.add_event('onclick', "var x=get_elements('week_filter'); if(x) x.set_value('')")
month_span.add_event("onclick", "get_elements('cal_left_control_hid').set_value('%s:%s');get_elements('cal_right_control_hid').set_value('%s:%s');document.form.submit()" % (year,month[0], year, month[0]) )
month_span.add_class("hand")
# add a little bit more space using part of the margin_error
month_span.add_style("width: %.1fpx" % (month_width + margin_error/len(my.months)/4))
month_span.add_style("float: left")
month_span.add_style("text-align: center")
if idx % 2 == 0:
month_span.add_class("calendar_month_even")
else:
month_span.add_class("calendar_month_odd")
# draw weeks, days only if the user has chosen a very narrow boundary
if my.num_months <= 2 or my.show_days:
my._draw_weeks(month_span, month_width, month, idx)
if my.num_months == 1 or (my.num_months == 2 and my.width >= 800)\
or my.show_days:
my._draw_days(month_span, month_width, month, idx)
div.add(month_span)
# add divider
if not is_ajax:
divider = my._get_divider(idx * float(my.width)/my.num_months)
div.add(divider)
# add individual year div back into year_main_div
year_index = 0
for year in xrange(my.left_year_bound, my.right_year_bound):
year_span = SpanWdg(year)
year_div = FloatDivWdg(year_span, css='center_content',\
width=year_widths[year_index])
year_div.add_event('onclick', "var x=get_elements('week_filter'); if(x) x.set_value('')")
year_div.add_event("onclick", "get_elements('cal_left_control_hid').set_value('%s:Jan');get_elements('cal_right_control_hid').set_value('%s:Dec');document.form.submit()" % (year,year) )
year_div.add_class("hand")
if year % 2 == 0:
year_div.add_class("calendar_year_even")
else:
year_div.add_class("calendar_year_odd")
year_main_div.add(year_div)
year_index += 1
# add the last divider
if not is_ajax:
divider = my._get_divider(my.width)
div.add(divider)
# readjust the lines on load both vertically and horizontally
ref_table_id = my.parent_wdg.table.get_id()
y_offset = 30
AppServer.add_onload_script("TacticCalendarLabel.realign('calendar_divider','cal_label_area',"\
"'%s', %s)" %(ref_table_id, y_offset))
script = my.get_show_cal_script()
main_div.add(script)
return main_div
def get_show_cal_script(my):
script = HtmlElement.script('''
function show_task_cal(input_name, element, date_string, column, script ) {
get_elements('calendar_column').set_value(column);
calendar_tactic.show_calendar(input_name, element, date_string)
calendar_tactic.cal.onClose = function() { if (!calendar_tactic.check(input_name)) return; eval(script) }
}
''')
return script
def _get_divider(my, left_pos, css='calendar_divider'):
'''get divider for each week'''
inside = DivWdg(css=css)
#inside.set_attr('name', 'cal_divider')
inside.add_style("position: absolute")
inside.add_style("float: left")
inside.add_style("border-style: dashed")
inside.add_style("border-width: 0px 1px 0px 0px")
inside.add_style("height: 100%")
inside.add_style("width: 1px" )
inside.add_style("left: %spx" %left_pos )
return inside
def _get_month_days(my, year, week):
num_days_list = Calendar.get_monthday_time(year, week, month_digit=True)
month_days = [ (int(i[0]), int(i[1])) for i in num_days_list ]
return month_days
def _draw_weeks(my, div, width, monthyear, month_idx):
month, year = monthyear[0], monthyear[1]
month_digit = CalendarBarWdg.MONTHS.index(month) + 1
week_width_list = []
week_width = 0.0
num_days = Calendar.get_num_days(year, month_digit)
week = my.week_hid
db_date = '%s-%s-01' %(year, month_digit)
date = Date(db_date=db_date)
current_week = date.get_week()
day_width = float(width) / num_days
if week:
week_width_list.append((week, width))
else:
last_date = db_date
append_extra = False
for day in xrange(1, num_days+1):
if current_week != date.get_week():
week_width_list.append((current_week, week_width))
current_week = date.get_week()
week_width = 0.0
date.add_days(1)
# BUG: To circumvent a bug, it doesn't work on 2007-11-04
if date.get_db_date() == last_date:
last_date = date.get_db_date()
append_extra = True
continue
week_width += day_width
last_date = date.get_db_date()
# last week
if append_extra:
week_width += day_width
week_width_list.append((current_week, week_width))
for week, week_width in week_width_list:
week_div = FloatDivWdg(week, width = week_width, css='calendar_week_out')
week_id = 'cal_week_%s'%week
week_div.set_id(week_id)
week_div.add_event('onclick', "get_elements('cal_week_hid').set_value('%s')" % week)
week_div.add_event('onclick', "var x =get_elements('week_filter'); if(x) x.set_value('')")
week_div.add_event('onmouseover',"Effects.css_morph('%s','calendar_week_over')" %week_id)
week_div.add_event('onmouseout',"Effects.css_morph('%s','calendar_week_out')" %week_id)
div.add(week_div)
div.add(HtmlElement.br())
def _draw_days(my, div, width, monthyear, month_idx):
'''draw the days for each month'''
div.add(HtmlElement.br())
month, year = monthyear[0], monthyear[1]
month_digit = CalendarBarWdg.MONTHS.index(month) + 1
num_days = Calendar.get_num_days(year, month_digit)
day_range = xrange(1, num_days + 1)
week = my.week_hid
if week:
# handle the cross-year scenario
if int(week) == 1 and month_digit == 12:
year += 1
num_days_list = Calendar.get_monthday_time(year, week)
month_days = my._get_month_days(year, week)
day_range = month_days
for day in day_range:
if isinstance(day, tuple):
month_digit, day = day[0], day[1]
weekday = Calendar.get_weekday(year, month_digit, day)
# show every day
# add divider for days
#divider = my._get_divider(day, float(width) / num_days)
#div.add(divider)
# show divider every week
if weekday == 6 and not week:
left_pos = float(width) / len(day_range) * day + width * month_idx
divider = my._get_divider( left_pos )
div.add(divider)
day_div = FloatDivWdg(day, css='smaller')
# grey out weekends
if weekday > 4:
day_div.add_style('background', '#bbb')
# try to get up to 2 decimal point for the width
day_div.add_style('width', '%.2fpx' %(width / len(day_range)))
font_size = int(width/num_days*0.75)
if font_size > 10:
font_size = 10
day_div.add_style('font-size: %spx' % font_size )
div.add(day_div)
def get_display(my):
# TODO: configure a different color for a different login
color = "orange"
my.get_info()
sobject = my.get_current_sobject()
# this changes depending on whether it is in ajax mode
div = None
if not my.is_ajax:
div = DivWdg()
if my.user_defined_bound:
div.add_style('margin-left', my.LEFT_MARGIN)
div.set_id("calendar_range_%s" % sobject.get_id() )
div.add_style("display: block")
else:
div = Widget()
div.add(HiddenWdg('cal_week_hid', my.week_hid))
# until "hsl(120, 50%, 50%)" is supprted by all browsers, use literal color names
div1 = my.get_date_range_wdg("bid_start_date", "bid_end_date", color)
if not div1:
span = SpanWdg("<i>No Dates Set</i>")
span.add_class("cal_in_bound")
div.add(span)
return div
div.add(div1)
if not my.valid_date:
msg = HtmlElement.blink('invalid')
msg.add_style('color', 'red')
msg.add_class('small')
div.add(SpanWdg(msg, css='small'))
# reset it
my.valid_date = True
if my.get_option("actual_display") == "true":
div.add(HtmlElement.br())
div.add(my.get_status_history_wdg(sobject))
if my.get_option("checkin_display") == "true":
div.add(HtmlElement.br())
div.add(my.get_checkin_history_wdg(sobject))
return div
def get_date_range_wdg(my,start_date_col,end_date_col,color):
if start_date_col == "bid_start_date":
type = "bid"
else:
type = "actual"
edit = True
if eval("my.%s_edit" % type)== "false":
edit = False
sobject = my.get_current_sobject()
start_date = sobject.get_value(start_date_col)
end_date = sobject.get_value(end_date_col)
if end_date and str(start_date) > str(end_date):
my.valid_date = False
# determine dependency: not very efficient!!!
if my.get_option("dependent_display") == "true":
is_dependent = sobject.get_value("depend_id")
has_dependents = False
for tmp_sobj in my.sobjects:
if tmp_sobj == sobject:
continue
if tmp_sobj.get_value("depend_id") == sobject.get_id():
has_dependents = True
break
else:
is_dependent = False
has_dependents = False
# special case for the value of bid_end_date, we can use duration
if type == "bid" and start_date == "":
bid_duration = sobject.get_value("bid_duration")
if bid_duration != "":
div = DivWdg("%s days" % bid_duration)
return div
if type == "bid" and end_date == "":
bid_duration = sobject.get_value("bid_duration")
if bid_duration != "":
bid_start_date = sobject.get_value("bid_start_date")
if bid_start_date != "":
date = Date(db=bid_start_date)
date.add_days(bid_duration)
end_date = date.get_db_time()
# handle cases where there are no dates or dates missing
no_label_flag = False
if start_date == "" and end_date == "":
if not edit:
return None
# get today's date
date = Date()
start_date = date.get_db_time()
end_date = date.get_db_time()
no_label_flag = True
elif start_date == "":
start_date = end_date
elif end_date == "":
end_date = start_date
# this conversion is needed for the js calenadar
start_date, time = str(start_date).split(" ")
end_date, time = str(end_date).split(" ")
info = my.calculate_widths(start_date, end_date)
start_width, end_width = info.get('width')
s_month_label, s_day = info.get('s_label')
e_month_label, e_day = info.get('e_label')
# create the labels
if no_label_flag:
start_width = int(my.width / 2)
end_width = int(my.width / 2)
start_label = SpanWdg("--- ")
start_label.set_class("cal_in_bound")
end_label = SpanWdg(" ---")
end_label.set_class("cal_in_bound")
else:
start_label = SpanWdg("%s-%s " %(s_month_label, s_day))
start_label.set_class("cal_in_bound")
end_label = SpanWdg(" %s-%s" %(e_month_label, e_day))
end_label.set_class("cal_in_bound")
# check for boundary
if start_width > my.width:
start_width = my.width
start_label.set_class('cal_out_bound')
elif start_width < 0:
start_width = 0
start_label.set_class('cal_out_bound')
if end_width > my.width:
end_width = my.width
end_label.set_class('cal_out_bound')
elif end_width < 0:
end_width = 0
end_label.set_class('cal_out_bound')
# calculate the length of the duration width
# offset for the border thickness
offset = 5
duration_width = end_width - start_width - offset
if duration_width < 0:
duration_width = 0
# Create the actual interface using a top level Widget.
# NOTE: This should not be a div, otherwise it will be duplicated
# on ajax load
widget = Widget()
spacer = DivWdg()
spacer.add_style("height: 5px")
spacer.add_style("float: left")
spacer.add_style("text-align: right")
# width can be zero if it is out of bound
spacer.add_style("width", start_width)
start_div = DivWdg(start_label)
start_div.set_id("%s_%s" % ( start_date_col, sobject.get_id()) )
start_div.set_style("float: right; white-space: nowrap")
spacer.add(start_div)
widget.add(spacer)
if duration_width:
if is_dependent and not no_label_flag:
depend_div = DivWdg()
depend_div.add_style("float: left")
depend_div.add(" ")
depend_div.add_style("width: 0px")
depend_div.add_style("height: 10px")
depend_div.add_style("border-style: solid")
depend_div.add_style("border-width: 0 1px 0 0")
depend_div.add_class('cal_depend')
widget.add(depend_div)
duration_info = []
duration_info.append( [int(duration_width), color] )
for info in duration_info:
duration_width, color = info
# draw the duration
duration = DivWdg(" ")
if type == 'bid':
duration.add_class('cal_duration')
duration.add_style("height: 4px")
else:
duration.add_style("background-color: %s" % color)
duration.add_style("height: 5px")
# for IE
duration.add_style("font-size: 5px" )
duration.add_style("margin-top: 4px")
duration.add_style("float: left")
duration.add_style("width", duration_width)
widget.add(duration)
# draw the end dependency
if has_dependents and not no_label_flag:
depend_div = DivWdg()
depend_div.add_style("float: left")
depend_div.add(" <br/> ")
depend_div.add_style("width: 0px")
depend_div.add_style("height: 30px")
depend_div.add_style("border-style: solid")
depend_div.add_style("border-width: 0 1px 0 0")
depend_div.add_class('cal_depend')
widget.add(depend_div)
else:
spacer = SpanWdg("|")
spacer.add_style("color: #aaa")
spacer.add_style("float: left")
widget.add(spacer)
end_div = DivWdg(end_label, css='small')
end_div.set_id("%s_%s" % (end_date_col,sobject.get_id() ) )
end_div.add_style("float: left")
end_div.add_style("white-space: nowrap")
widget.add(end_div)
if not edit:
return widget
# change the cursor icon
start_div.add_class("hand")
end_div.add_class("hand")
# add the ajax object
wdg_name = "calendar_range_%s" % sobject.get_id()
ajax = AjaxLoader(wdg_name)
ajax.register_cmd("pyasm.widget.CalendarSetCmd")
ajax.set_load_class("pyasm.widget.CalendarBarWdg")
ajax.set_option("search_key", sobject.get_search_key() )
ajax.set_option("start_year", my.start_year)
ajax.set_option("end_year", my.end_year)
ajax.set_option("start_month", my.start_month)
ajax.set_option("end_month", my.end_month)
ajax.set_option("calendar_width", my.width)
ajax.set_option("calendar_margin", my.cal_margin)
ajax.set_option("bid_edit" , my.bid_edit)
ajax.set_option("actual_edit" , my.actual_edit)
ajax.set_option("cal_week_hid" , my.week_hid)
ajax.add_element_name("calendar_column")
ajax.add_element_name(my.calendar.get_input_name())
on_script = Common.escape_quote(ajax.get_on_script())
start_div.add_event("onclick", "show_task_cal('%s',this, '%s','%s','%s')" \
% ( my.CAL_INPUT, start_date, start_date_col, on_script) )
start_div.add_event("onmouseover", "this.style.fontWeight = 'bold'" )
start_div.add_event("onmouseout", "this.style.fontWeight = 'normal'" )
end_div.add_event("onclick", "show_task_cal('%s', this, '%s','%s','%s')" \
% ( my.CAL_INPUT, end_date, end_date_col, on_script) )
end_div.add_event("onmouseover", "this.style.fontWeight = 'bold'" )
end_div.add_event("onmouseout", "this.style.fontWeight = 'normal'" )
return widget
def calculate_widths(my, start_date, end_date):
'''calculate the pixel width for the dates, returns a dict of info
['width'], ['s_label'], ['e_label']'''
if str(start_date).count(" "):
start_date, time = str(start_date).split(" ")
if str(end_date).count(" "):
end_date, time = str(end_date).split(" ")
month_unit = float(my.width)/ my.num_months
leftmost_day = 1
# in case a particular week is selected
week = my.week_hid
num_days = 30.5
# calculate pixels
s_year,s_month,s_day = [int(x) for x in str(start_date).split("-")]
e_year,e_month,e_day = [int(x) for x in str(end_date).split("-")]
s_month_label = my.MONTHS[s_month-1]
leftmost_month = CalendarBarWdg.MONTHS.index(my.first_month) + 1
# check if only 1 week is shown
s_diff_month = (s_year - my.left_year_bound) * 12 - leftmost_month
e_diff_month = (e_year - my.left_year_bound) * 12 - leftmost_month
if week:
num_days = 7.0
year = my.months[0][1]
left_year_bound = my.left_year_bound
first_adjust = False
# handle the cross year scenario, when user clicks on the portion of
# week 1 of next year that spans a few days of the previous year
if int(week) == 1 and leftmost_month == 12:
year += 1
first_adjust = True
leftmost_month, leftmost_day = my._get_month_days(year, week)[0]
# must do it again with the updated leftmost_month
# the user clicks on the first week of a new year that started in the
# previous year
if int(week) == 1 and leftmost_month == 12 and not first_adjust:
left_year_bound = my.left_year_bound - 1
rightmost_month, rightmost_day = my._get_month_days(year, week)[6]
s_date = Date(db_date=start_date)
e_date = Date(db_date=end_date)
# boolean to show if a date is within the boundary of the defined calendar
in_bound_date = True
rightmost_date = Date(db_date='%s-%s-%s' %(year,rightmost_month, rightmost_day) )
if s_date.get_db_date() > rightmost_date.get_db_date():
# this +12 is just arbitrary to make it out of bound
# TODO: calculate the real s_month and e_month
in_bound_date = False
s_diff_month +=12
else:
s_diff_month = (s_year - left_year_bound) * 12 - leftmost_month
if e_date.get_db_date() > rightmost_date.get_db_date():
# this +12 is just arbitrary to make it out of bound
# TODO: calculate the real s_month and e_month
in_bound_date = False
e_diff_month +=12
else:
e_diff_month = (e_year - left_year_bound) * 12 - leftmost_month
date = Date(db_date=start_date)
date.add_days(31)
current_date = Date(db_date='%s-01-01' %my.left_year_bound)
recent = False
if date.get_db_date() > current_date.get_db_date():
recent = True
# s_month is re-enumerated according to the length of the displayed months
s_month += s_diff_month
day_unit = month_unit / num_days
start_width = -1
diff_day = s_day - leftmost_day
if diff_day < 0 and s_month > 0 and in_bound_date:
diff_day += Calendar.get_num_days(left_year_bound, leftmost_month)
month_unit = 0
if s_month >= 0:
start_width = float('%.1f' %(s_month*month_unit + (diff_day)*day_unit))
e_month_label = my.MONTHS[e_month-1]
end_width = -1
e_month += e_diff_month
# -leftmost_day since we are using it as a ref. point
diff_day = e_day-leftmost_day
if diff_day < 0 and e_month > 0 and in_bound_date:
diff_day += Calendar.get_num_days(left_year_bound, leftmost_month)
# month_unit is not needed in cross-month situation
month_unit = 0
if e_month >= 0:
end_width = float('%.1f' %(e_month*month_unit + (diff_day + 1)*day_unit))
info = {}
info['width'] = (start_width, end_width)
info['s_label'] = (s_month_label, s_day)
info['e_label'] = (e_month_label, e_day)
return info
# handle status history display
def preprocess_status(my):
if not my.sobjects:
return
if my.statuses:
return
search = Search("sthpw/status_log")
search_type = my.sobjects[0].get_search_type()
search.add_filter("search_type", search_type)
search.add_filters("search_id", SObject.get_values(my.sobjects, "id") )
search.add_order_by("timestamp")
status_changes = search.get_sobjects()
for status_change in status_changes:
key = "%s|%s" % (status_change.get_value("search_type"), status_change.get_value("search_id") )
changes = my.statuses.get(key)
if not changes:
changes = []
my.statuses[key] = changes
changes.append(status_change)
def get_status_history_wdg(my,sobject):
my.preprocess_status()
mode = my.get_option("actual_mode")
if not mode:
#mode = "single"
mode = "detail"
widget = Widget()
status_changes = my.statuses.get( sobject.get_search_key() )
if not status_changes:
widget.add("...")
return widget
pipeline = Pipeline.get_by_sobject(sobject)
if not pipeline:
pipeline = Pipeline.get_by_code("task")
# calculate the range
changes = []
last_timestamp = None
status_changes.reverse()
for status_change in status_changes:
change = []
timestamp = str(status_change.get_value("timestamp"))
to_status = str(status_change.get_value("to_status"))
user = str(status_change.get_value("login"))
process = pipeline.get_process(to_status)
if not last_timestamp:
last_timestamp = timestamp
tmp = Date(db=last_timestamp)
else:
# remove a day from the last timestamp
tmp = Date(db=last_timestamp)
tmp.add_days(-1)
change = [timestamp, tmp.get_db_date(), process, to_status, user]
changes.append(change)
last_timestamp = timestamp
# draw all of the bars
count = 0
changes.reverse()
last_change = False
for change in changes:
if count == len(changes)-1:
last_change = True
start_date, end_date, process, to_status, user = change
color = 'grey'
if process:
process_color = process.get_color()
if process_color:
color = process_color
#to_status = process.get_name()
start_width, end_width = my.calculate_widths(start_date,end_date).get('width')
if start_width < 0:
start_width = 0
elif start_width > my.width:
start_width = my.width
if end_width > my.width:
end_width = my.width
# set the spacer: used for either the first or all in detail mode
if mode == "detail" or not count:
spacer = DivWdg()
spacer.add_style("height: 5px")
spacer.add_style("float: left")
spacer.add_style("text-align: right")
spacer.add_style("width", start_width)
widget.add(spacer)
duration = DivWdg()
#duration.add_style("border: 1px dotted %s" % color)
#duration.add_style("height: 4px")
duration.add_style("background-color: %s" % color)
duration.add_style("height: 5px")
height = 4
duration_width = end_width - start_width + 1
if last_change:
duration_width = 8
height = 5
duration.add_style("height: %spx" % height)
# for IE
duration.add_style("font-size: 5px" )
duration.add_style("margin-top: 4px")
duration.add_style("float: left")
duration.add_style("width", duration_width)
widget.add(duration)
start_display_date = Date(db=start_date).get_display("%b %d")
display_date = Date(db=end_date).get_display("%b %d")
# add a tip
duration.add_tip("Date: %s to %s<br/>Status: %s<br/>User: %s<br/>" % (start_display_date, display_date, to_status, user) )
if mode == "detail" or last_change:
widget.add( "<i style='font-size: 0.8em; '> %s (%s)</i>" % (display_date, to_status) )
if mode == "detail":
widget.add( HtmlElement.br() )
count += 1
return widget
def get_checkin_history_wdg(my,sobject):
widget = Widget()
# calculate all of the ranges and percentages
parent = sobject.get_parent()
# FIXME: big assumption that context == process
process = sobject.get_value("process")
snapshots = Snapshot.get_by_sobject(parent,process)
snapshots.reverse()
for snapshot in snapshots:
start_date = snapshot.get_value("timestamp")
end_date = start_date
# draw up all of the ranges
#start_width, end_width = my.calculate_widths(start_date,end_date).get('width')
start_width, end_width = my.calculate_widths(start_date,end_date).get('width')
if start_width < 0:
start_width = 0
elif start_width > my.width:
start_width = my.width
if end_width > my.width:
end_width = my.width
# set the spacer: used for either the first or all in detail mode
spacer = DivWdg()
spacer.add_style("height: 5px")
spacer.add_style("float: left")
spacer.add_style("text-align: right")
spacer.add_style("width", start_width)
widget.add(spacer)
duration = DivWdg()
duration.add_style("background-color: grey")
duration.add_style("height: 5px")
duration.add_style("float: left")
duration.add_style("text-align: right")
duration.add_style("vertical-align: middle")
duration.add_style("width", "5px")
widget.add(duration)
start_display_date = Date(db=start_date).get_display("%b %d")
display_date = Date(db=end_date).get_display("%b %d")
version = snapshot.get_value("version")
context = snapshot.get_value("context")
user = snapshot.get_value("login")
duration.add_tip("Date: %s<br/>Context: %s<br/>Version: %s<br/>User: %s<br/>" % (display_date, context, version, user) )
widget.add(" <i style='font-size: 0.8em;'>%s (v%0.2d %s)</i>" % (display_date, int(version), context))
widget.add( HtmlElement.br() )
return widget
def get_resource_usage_wdg(my,sobject):
month_unit = float(my.width)/ my.num_months
day_unit = month_unit / 30.5
# calculate pixels
s_year,s_month,s_day = [int(x) for x in str(start_date).split("-")]
# get all of the tasks
tasks = my.sobjects
widget = Widget()
# calculate all of the ranges and percentages
for task in tasks:
bid_start_date = my.get_value("bid_start_date")
bid_end_date = my.get_value("bid_end_date")
# draw up all of the ranges
#start_width, end_width = my.calculate_widths(start_date,end_date).get('width')
from pyasm.command import Command, CommandExitException
class CalendarSetCmd(Command):
def get_title(my):
return "Set Task Date"
def check(my):
return True
def execute(my):
web = WebContainer.get_web()
search_key = web.get_form_value("search_key")
sobject = Search.get_by_search_key(search_key)
date = web.get_form_value(CalendarBarWdg.CAL_INPUT)
# date can be empty to clear the value
if date == "__NONE__":
raise CommandExitException()
column = web.get_form_value("calendar_column")
sobject.set_value(column, date)
# make sure end date is not before start date
if column.endswith("end_date"):
start_date = sobject.get_value( column.replace("end", "start") )
if start_date and str(start_date) > date and date:
sobject.set_value( column.replace("end","start"), date)
# make sure end date is not before start date
elif column.endswith("start_date"):
end_date = sobject.get_value( column.replace("start", "end") )
# dates can be empty
if end_date and str(end_date) < date and date:
sobject.set_value( column.replace("start","end"), date)
sobject.commit()
sobject.update_dependent_tasks()
my.description = "Set %s to '%s'" % (column, date)
|
yselkowitz/pluma-plugins | refs/heads/master | plugins/commander/modules/format.py | 2 | import commander.commands as commands
__commander_module__ = True
def remove_trailing_spaces(view, all=False):
"""Remove trailing spaces: format.remove-trailing-spaces [<all>]
Remove trailing spaces in the selection. If there is no selection, trailing
spaces are removed from the whole document. When the optional argument
<all> is specified, trailing spaces will be removed from all
the open documents."""
if all:
buffers = view.get_toplevel().get_documents()
else:
buffers = [view.get_buffer()]
for buf in buffers:
bounds = buf.get_selection_bounds()
if not bounds:
bounds = buf.get_bounds()
buf.begin_user_action()
try:
# For each line, remove trailing spaces
if not bounds[1].ends_line():
bounds[1].forward_to_line_end()
until = buf.create_mark(None, bounds[1], False)
start = bounds[0]
start.set_line_offset(0)
while start.compare(buf.get_iter_at_mark(until)) < 0:
end = start.copy()
end.forward_to_line_end()
last = end.copy()
if end.equal(buf.get_end_iter()):
end.backward_char()
while end.get_char().isspace() and end.compare(start) > 0:
end.backward_char()
if not end.ends_line():
if not end.get_char().isspace():
end.forward_char()
buf.delete(end, last)
start = end.copy()
start.forward_line()
except Exception, e:
print e
buf.delete_mark(until)
buf.end_user_action()
return commands.result.HIDE
def _transform(view, how, all):
if all:
buffers = view.get_toplevel().get_documents()
else:
buffers = [view.get_buffer()]
for buf in buffers:
bounds = buf.get_selection_bounds()
if not bounds:
start = buf.get_iter_at_mark(buf.get_insert())
end = start.copy()
if not end.ends_line():
end.forward_to_line_end()
bounds = [start, end]
if not bounds[0].equal(bounds[1]):
text = how(bounds[0].get_text(bounds[1]))
buf.begin_user_action()
buf.delete(bounds[0], bounds[1])
buf.insert(bounds[0], text)
buf.end_user_action()
return commands.result.HIDE
def upper(view, all=False):
"""Make upper case: format.upper [<all>]
Transform text in selection to upper case. If the optional argument <all>
is specified, text in all the open documents will be transformed."""
return _transform(view, lambda x: x.upper(), all)
def lower(view, all=False):
"""Make lower case: format.lower [<all>]
Transform text in selection to lower case. If the optional argument <all>
is specified, text in all the open documents will be transformed."""
return _transform(view, lambda x: x.lower(), all)
def title(view, all=False):
"""Make title case: format.title [<all>]
Transform text in selection to title case. If the optional argument <all>
is specified, text in all the open documents will be transformed."""
return _transform(view, lambda x: x.title().replace('_', ''), all)
|
GunoH/intellij-community | refs/heads/master | python/testData/refactoring/pushdown/simple.after.py | 79 | class Foo:
pass
class Boo(Foo):
def boo(self):
print "rrrrr"
def foo(self):
print("a") |
angr/angr | refs/heads/master | angr/procedures/libc/memcmp.py | 2 | import angr
import logging
l = logging.getLogger(name=__name__)
class memcmp(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, s1_addr, s2_addr, n):
max_memcmp_size = self.state.libc.max_buffer_size
definite_size = self.state.solver.min_int(n)
conditional_s1_start = s1_addr + definite_size
conditional_s2_start = s2_addr + definite_size
if self.state.solver.symbolic(n):
conditional_size = int(max(max_memcmp_size - definite_size, 0))
else:
conditional_size = 0
l.debug("Definite size %s and conditional size: %s", definite_size, conditional_size)
if definite_size > 0:
s1_part = self.state.memory.load(s1_addr, definite_size, endness='Iend_BE')
s2_part = self.state.memory.load(s2_addr, definite_size, endness='Iend_BE')
cases = [ [s1_part == s2_part, self.state.solver.BVV(0, self.state.arch.bits)], [self.state.solver.ULT(s1_part, s2_part), self.state.solver.BVV(-1, self.state.arch.bits)], [self.state.solver.UGT(s1_part, s2_part), self.state.solver.BVV(1, self.state.arch.bits) ] ]
definite_answer = self.state.solver.ite_cases(cases, 2)
constraint = self.state.solver.Or(*[c for c,_ in cases])
self.state.add_constraints(constraint)
l.debug("Created definite answer: %s", definite_answer)
l.debug("Created constraint: %s", constraint)
l.debug("... crom cases: %s", cases)
else:
definite_answer = self.state.solver.BVV(0, self.state.arch.bits)
if not self.state.solver.symbolic(definite_answer) and self.state.solver.eval(definite_answer) != 0:
return definite_answer
if conditional_size > 0:
s1_all = self.state.memory.load(conditional_s1_start, conditional_size, endness='Iend_BE')
s2_all = self.state.memory.load(conditional_s2_start, conditional_size, endness='Iend_BE')
conditional_rets = { 0: definite_answer }
for byte, bit in zip(range(conditional_size), range(conditional_size*8, 0, -8)):
s1_part = s1_all[conditional_size*8-1 : bit-8]
s2_part = s2_all[conditional_size*8-1 : bit-8]
cases = [ [s1_part == s2_part, self.state.solver.BVV(0, self.state.arch.bits)], [self.state.solver.ULT(s1_part, s2_part), self.state.solver.BVV(-1, self.state.arch.bits)], [self.state.solver.UGT(s1_part, s2_part), self.state.solver.BVV(1, self.state.arch.bits) ] ]
conditional_rets[byte+1] = self.state.solver.ite_cases(cases, 0)
self.state.add_constraints(self.state.solver.Or(*[c for c,_ in cases]))
ret_expr = self.state.solver.If(definite_answer == 0, self.state.solver.ite_dict(n - definite_size, conditional_rets, 2), definite_answer)
self.state.add_constraints(self.state.solver.Or(*[n-definite_size == c for c in conditional_rets]))
return ret_expr
else:
return definite_answer
|
oscar6echo/ezhc | refs/heads/master | ezhc/samples/build_samples.py | 2 |
import numpy as np
import pandas as pd
import json
def df_one_idx_several_col():
dic = {'John': [5, 3, 4, 7, 2],
'Jane': [2, 2, 3, 2, 1],
'Joe': [3, 4, 4, 2, 5]}
df = pd.DataFrame.from_dict(dic)
df.index = ['Apples', 'Oranges', 'Pears', 'Grapes', 'Bananas']
df.index.name = 'Fruit'
return df
def df_one_idx_one_col():
arr = np.array([['Firefox', 'IE', 'Chrome', 'Safari', 'Opera', 'Others'],
['45.0', '26.8', '12.8', '8.5', '6.2', '0.7']]).T
df = pd.DataFrame(index=data[:, 0], data=data[:, 1], columns=['MktShare'], dtype=np.float)
df.index.name = 'Brand'
return df
def df_one_idx_two_col():
data = [
[-9.7, 9.4],
[-8.7, 6.5],
[-3.5, 9.4],
[-1.4, 19.9],
[0.0, 22.6],
[2.9, 29.5],
[9.2, 30.7],
[7.3, 26.5],
[4.4, 18.0],
[-3.1, 11.4],
[-5.2, 10.4],
[-13.5, 9.8]
]
idx = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
df = pd.DataFrame(index=idx, data=data, columns=['Min', 'Max'])
df.index.name = 'Month'
return df
def df_two_idx_one_col():
data = [
['Internet Explorer',8.0,26.61],
['Internet Explorer',9.0,16.96],
['Chrome',18.0,8.01],
['Chrome',19.0,7.73],
['Firefox',12,6.72],
['Internet Explorer',6.0,6.40],
['Firefox',11,4.72],
['Internet Explorer',7.0,3.55],
['Safari',5.1,3.53],
['Firefox',13,2.16],
['Firefox',3.6,1.87],
['Opera','11.x',1.30],
['Chrome',17.0,1.13],
['Firefox',10,0.90],
['Safari',5.0,0.85],
['Firefox',9.0,0.65],
['Firefox',8.0,0.55],
['Firefox',4.0,0.50],
['Chrome',16.0,0.45],
['Firefox',3.0,0.36],
['Firefox',3.5,0.36],
['Firefox',6.0,0.32],
['Firefox',5.0,0.31],
['Firefox',7.0,0.29],
['Unknown','Unknown',0.29],
['Chrome',18.0,0.26],
['Chrome',14.0,0.25],
['Chrome',20.0,0.24],
['Chrome',15.0,0.18],
['Chrome',12.0,0.16],
['Opera','12.x',0.15],
['Safari',4.0,0.14],
['Chrome',13.0,0.13],
['Safari',4.1,0.12],
['Chrome',11.0,0.10],
['Firefox',14,0.10],
['Firefox',2.0,0.09],
['Chrome',10.0,0.09],
['Opera','10.x',0.09],
['Internet Explorer',8.0,0.09]
]
df = pd.DataFrame(data=data, columns=['Brand', 'Version', 'Market Share'])
df = df.groupby(['Brand', 'Version']).sum().reset_index()
df['Agg'] = df[['Brand', 'Market Share']].groupby(['Brand']).transform(sum)
df = df.sort(['Agg', 'Version'], ascending=[False, True])
df = df.set_index(['Brand', 'Version'])
df = df.drop('Agg', axis=1)
return df
def df_scatter_old(N=10):
np.random.seed(123456)
mi = pd.MultiIndex.from_arrays([np.random.uniform(0, 10, size=N), np.random.uniform(0, 10, size=N)])
df = pd.DataFrame(index=mi, data=np.random.choice(['A', 'B'], size=N))
df.index.names = ['x', 'y']
df.columns = ['value']
return df
def df_scatter():
data = """
[{
"name": "Female",
"color": "rgba(223, 83, 83, .5)",
"data": [[161.2, 51.6], [167.5, 59.0], [159.5, 49.2], [157.0, 63.0], [155.8, 53.6],
[170.0, 59.0], [159.1, 47.6], [166.0, 69.8], [176.2, 66.8], [160.2, 75.2],
[172.5, 55.2], [170.9, 54.2], [172.9, 62.5], [153.4, 42.0], [160.0, 50.0],
[147.2, 49.8], [168.2, 49.2], [175.0, 73.2], [157.0, 47.8], [167.6, 68.8],
[159.5, 50.6], [175.0, 82.5], [166.8, 57.2], [176.5, 87.8], [170.2, 72.8],
[174.0, 54.5], [173.0, 59.8], [179.9, 67.3], [170.5, 67.8], [160.0, 47.0],
[154.4, 46.2], [162.0, 55.0], [176.5, 83.0], [160.0, 54.4], [152.0, 45.8],
[162.1, 53.6], [170.0, 73.2], [160.2, 52.1], [161.3, 67.9], [166.4, 56.6],
[168.9, 62.3], [163.8, 58.5], [167.6, 54.5], [160.0, 50.2], [161.3, 60.3],
[167.6, 58.3], [165.1, 56.2], [160.0, 50.2], [170.0, 72.9], [157.5, 59.8],
[167.6, 61.0], [160.7, 69.1], [163.2, 55.9], [152.4, 46.5], [157.5, 54.3],
[168.3, 54.8], [180.3, 60.7], [165.5, 60.0], [165.0, 62.0], [164.5, 60.3],
[156.0, 52.7], [160.0, 74.3], [163.0, 62.0], [165.7, 73.1], [161.0, 80.0],
[162.0, 54.7], [166.0, 53.2], [174.0, 75.7], [172.7, 61.1], [167.6, 55.7],
[151.1, 48.7], [164.5, 52.3], [163.5, 50.0], [152.0, 59.3], [169.0, 62.5],
[164.0, 55.7], [161.2, 54.8], [155.0, 45.9], [170.0, 70.6], [176.2, 67.2],
[170.0, 69.4], [162.5, 58.2], [170.3, 64.8], [164.1, 71.6], [169.5, 52.8],
[163.2, 59.8], [154.5, 49.0], [159.8, 50.0], [173.2, 69.2], [170.0, 55.9],
[161.4, 63.4], [169.0, 58.2], [166.2, 58.6], [159.4, 45.7], [162.5, 52.2],
[159.0, 48.6], [162.8, 57.8], [159.0, 55.6], [179.8, 66.8], [162.9, 59.4],
[161.0, 53.6], [151.1, 73.2], [168.2, 53.4], [168.9, 69.0], [173.2, 58.4],
[171.8, 56.2], [178.0, 70.6], [164.3, 59.8], [163.0, 72.0], [168.5, 65.2],
[166.8, 56.6], [172.7, 105.2], [163.5, 51.8], [169.4, 63.4], [167.8, 59.0],
[159.5, 47.6], [167.6, 63.0], [161.2, 55.2], [160.0, 45.0], [163.2, 54.0],
[162.2, 50.2], [161.3, 60.2], [149.5, 44.8], [157.5, 58.8], [163.2, 56.4],
[172.7, 62.0], [155.0, 49.2], [156.5, 67.2], [164.0, 53.8], [160.9, 54.4],
[162.8, 58.0], [167.0, 59.8], [160.0, 54.8], [160.0, 43.2], [168.9, 60.5],
[158.2, 46.4], [156.0, 64.4], [160.0, 48.8], [167.1, 62.2], [158.0, 55.5],
[167.6, 57.8], [156.0, 54.6], [162.1, 59.2], [173.4, 52.7], [159.8, 53.2],
[170.5, 64.5], [159.2, 51.8], [157.5, 56.0], [161.3, 63.6], [162.6, 63.2],
[160.0, 59.5], [168.9, 56.8], [165.1, 64.1], [162.6, 50.0], [165.1, 72.3],
[166.4, 55.0], [160.0, 55.9], [152.4, 60.4], [170.2, 69.1], [162.6, 84.5],
[170.2, 55.9], [158.8, 55.5], [172.7, 69.5], [167.6, 76.4], [162.6, 61.4],
[167.6, 65.9], [156.2, 58.6], [175.2, 66.8], [172.1, 56.6], [162.6, 58.6],
[160.0, 55.9], [165.1, 59.1], [182.9, 81.8], [166.4, 70.7], [165.1, 56.8],
[177.8, 60.0], [165.1, 58.2], [175.3, 72.7], [154.9, 54.1], [158.8, 49.1],
[172.7, 75.9], [168.9, 55.0], [161.3, 57.3], [167.6, 55.0], [165.1, 65.5],
[175.3, 65.5], [157.5, 48.6], [163.8, 58.6], [167.6, 63.6], [165.1, 55.2],
[165.1, 62.7], [168.9, 56.6], [162.6, 53.9], [164.5, 63.2], [176.5, 73.6],
[168.9, 62.0], [175.3, 63.6], [159.4, 53.2], [160.0, 53.4], [170.2, 55.0],
[162.6, 70.5], [167.6, 54.5], [162.6, 54.5], [160.7, 55.9], [160.0, 59.0],
[157.5, 63.6], [162.6, 54.5], [152.4, 47.3], [170.2, 67.7], [165.1, 80.9],
[172.7, 70.5], [165.1, 60.9], [170.2, 63.6], [170.2, 54.5], [170.2, 59.1],
[161.3, 70.5], [167.6, 52.7], [167.6, 62.7], [165.1, 86.3], [162.6, 66.4],
[152.4, 67.3], [168.9, 63.0], [170.2, 73.6], [175.2, 62.3], [175.2, 57.7],
[160.0, 55.4], [165.1, 104.1], [174.0, 55.5], [170.2, 77.3], [160.0, 80.5],
[167.6, 64.5], [167.6, 72.3], [167.6, 61.4], [154.9, 58.2], [162.6, 81.8],
[175.3, 63.6], [171.4, 53.4], [157.5, 54.5], [165.1, 53.6], [160.0, 60.0],
[174.0, 73.6], [162.6, 61.4], [174.0, 55.5], [162.6, 63.6], [161.3, 60.9],
[156.2, 60.0], [149.9, 46.8], [169.5, 57.3], [160.0, 64.1], [175.3, 63.6],
[169.5, 67.3], [160.0, 75.5], [172.7, 68.2], [162.6, 61.4], [157.5, 76.8],
[176.5, 71.8], [164.4, 55.5], [160.7, 48.6], [174.0, 66.4], [163.8, 67.3]]
}, {
"name": "Male",
"color": "rgba(119, 152, 191, .5)",
"data": [[174.0, 65.6], [175.3, 71.8], [193.5, 80.7], [186.5, 72.6], [187.2, 78.8],
[181.5, 74.8], [184.0, 86.4], [184.5, 78.4], [175.0, 62.0], [184.0, 81.6],
[180.0, 76.6], [177.8, 83.6], [192.0, 90.0], [176.0, 74.6], [174.0, 71.0],
[184.0, 79.6], [192.7, 93.8], [171.5, 70.0], [173.0, 72.4], [176.0, 85.9],
[176.0, 78.8], [180.5, 77.8], [172.7, 66.2], [176.0, 86.4], [173.5, 81.8],
[178.0, 89.6], [180.3, 82.8], [180.3, 76.4], [164.5, 63.2], [173.0, 60.9],
[183.5, 74.8], [175.5, 70.0], [188.0, 72.4], [189.2, 84.1], [172.8, 69.1],
[170.0, 59.5], [182.0, 67.2], [170.0, 61.3], [177.8, 68.6], [184.2, 80.1],
[186.7, 87.8], [171.4, 84.7], [172.7, 73.4], [175.3, 72.1], [180.3, 82.6],
[182.9, 88.7], [188.0, 84.1], [177.2, 94.1], [172.1, 74.9], [167.0, 59.1],
[169.5, 75.6], [174.0, 86.2], [172.7, 75.3], [182.2, 87.1], [164.1, 55.2],
[163.0, 57.0], [171.5, 61.4], [184.2, 76.8], [174.0, 86.8], [174.0, 72.2],
[177.0, 71.6], [186.0, 84.8], [167.0, 68.2], [171.8, 66.1], [182.0, 72.0],
[167.0, 64.6], [177.8, 74.8], [164.5, 70.0], [192.0, 101.6], [175.5, 63.2],
[171.2, 79.1], [181.6, 78.9], [167.4, 67.7], [181.1, 66.0], [177.0, 68.2],
[174.5, 63.9], [177.5, 72.0], [170.5, 56.8], [182.4, 74.5], [197.1, 90.9],
[180.1, 93.0], [175.5, 80.9], [180.6, 72.7], [184.4, 68.0], [175.5, 70.9],
[180.6, 72.5], [177.0, 72.5], [177.1, 83.4], [181.6, 75.5], [176.5, 73.0],
[175.0, 70.2], [174.0, 73.4], [165.1, 70.5], [177.0, 68.9], [192.0, 102.3],
[176.5, 68.4], [169.4, 65.9], [182.1, 75.7], [179.8, 84.5], [175.3, 87.7],
[184.9, 86.4], [177.3, 73.2], [167.4, 53.9], [178.1, 72.0], [168.9, 55.5],
[157.2, 58.4], [180.3, 83.2], [170.2, 72.7], [177.8, 64.1], [172.7, 72.3],
[165.1, 65.0], [186.7, 86.4], [165.1, 65.0], [174.0, 88.6], [175.3, 84.1],
[185.4, 66.8], [177.8, 75.5], [180.3, 93.2], [180.3, 82.7], [177.8, 58.0],
[177.8, 79.5], [177.8, 78.6], [177.8, 71.8], [177.8, 116.4], [163.8, 72.2],
[188.0, 83.6], [198.1, 85.5], [175.3, 90.9], [166.4, 85.9], [190.5, 89.1],
[166.4, 75.0], [177.8, 77.7], [179.7, 86.4], [172.7, 90.9], [190.5, 73.6],
[185.4, 76.4], [168.9, 69.1], [167.6, 84.5], [175.3, 64.5], [170.2, 69.1],
[190.5, 108.6], [177.8, 86.4], [190.5, 80.9], [177.8, 87.7], [184.2, 94.5],
[176.5, 80.2], [177.8, 72.0], [180.3, 71.4], [171.4, 72.7], [172.7, 84.1],
[172.7, 76.8], [177.8, 63.6], [177.8, 80.9], [182.9, 80.9], [170.2, 85.5],
[167.6, 68.6], [175.3, 67.7], [165.1, 66.4], [185.4, 102.3], [181.6, 70.5],
[172.7, 95.9], [190.5, 84.1], [179.1, 87.3], [175.3, 71.8], [170.2, 65.9],
[193.0, 95.9], [171.4, 91.4], [177.8, 81.8], [177.8, 96.8], [167.6, 69.1],
[167.6, 82.7], [180.3, 75.5], [182.9, 79.5], [176.5, 73.6], [186.7, 91.8],
[188.0, 84.1], [188.0, 85.9], [177.8, 81.8], [174.0, 82.5], [177.8, 80.5],
[171.4, 70.0], [185.4, 81.8], [185.4, 84.1], [188.0, 90.5], [188.0, 91.4],
[182.9, 89.1], [176.5, 85.0], [175.3, 69.1], [175.3, 73.6], [188.0, 80.5],
[188.0, 82.7], [175.3, 86.4], [170.5, 67.7], [179.1, 92.7], [177.8, 93.6],
[175.3, 70.9], [182.9, 75.0], [170.8, 93.2], [188.0, 93.2], [180.3, 77.7],
[177.8, 61.4], [185.4, 94.1], [168.9, 75.0], [185.4, 83.6], [180.3, 85.5],
[174.0, 73.9], [167.6, 66.8], [182.9, 87.3], [160.0, 72.3], [180.3, 88.6],
[167.6, 75.5], [186.7, 101.4], [175.3, 91.1], [175.3, 67.3], [175.9, 77.7],
[175.3, 81.8], [179.1, 75.5], [181.6, 84.5], [177.8, 76.6], [182.9, 85.0],
[177.8, 102.5], [184.2, 77.3], [179.1, 71.8], [176.5, 87.9], [188.0, 94.3],
[174.0, 70.9], [167.6, 64.5], [170.2, 77.3], [167.6, 72.3], [188.0, 87.3],
[174.0, 80.0], [176.5, 82.3], [180.3, 73.6], [167.6, 74.1], [188.0, 85.9],
[180.3, 73.2], [167.6, 76.3], [183.0, 65.9], [183.0, 90.9], [179.1, 89.1],
[170.2, 62.3], [177.8, 82.7], [179.1, 79.1], [190.5, 98.2], [177.8, 84.1],
[180.3, 83.2], [180.3, 83.2]]
}]
"""
import json
data = json.loads(data)
li_df = []
for k, d in enumerate(data):
df = pd.DataFrame(d['data'], columns=['Height', 'Weight'])
df['Sex'] = 'Female' if k==0 else 'Male'
li_df.append(df)
df = pd.concat(li_df)
df = df.set_index(['Height', 'Weight'])
return df
def df_bubble_old(N=10, P=3):
np.random.seed(123456)
mi = pd.MultiIndex.from_arrays([np.random.choice(['Cat'+str(1+k) for k in range(P)], size=N),
np.random.uniform(0, 10, size=N),
np.random.uniform(0, 10, size=N),
])
df = pd.DataFrame(index=mi, data=np.random.choice(range(1, 5), size=(N)), columns=['value'])
df.index.names = ['series', 'x', 'y']
df = df.sortlevel('series')
return df
def df_bubble():
data = """
[{
"data": [[97, 36, 79], [94, 74, 60], [68, 76, 58], [64, 87, 56], [68, 27, 73], [74, 99, 42], [7, 93, 87], [51, 69, 40], [38, 23, 33], [57, 86, 31]]
}, {
"data": [[25, 10, 87], [2, 75, 59], [11, 54, 8], [86, 55, 93], [5, 3, 58], [90, 63, 44], [91, 33, 17], [97, 3, 56], [15, 67, 48], [54, 25, 81]]
}, {
"data": [[47, 47, 21], [20, 12, 4], [6, 76, 91], [38, 30, 60], [57, 98, 64], [61, 17, 80], [83, 60, 13], [67, 78, 75], [64, 12, 10], [30, 77, 82]]
}]
"""
data = json.loads(data)
li_df = []
for k, d in enumerate(data):
df = pd.DataFrame(d['data'], columns=['x', 'y', 'Size'])
df['Cat'] = 'Cat'+str(1+k)
li_df.append(df)
# pd.read_json(tt[0])
df = pd.concat(li_df)
df = df.set_index(['Cat', 'x', 'y'])
return df
def df_heatmap():
def mapping(s):
return [x_cat[s[0]], y_cat[s[1]], s[2]]
data = [[0, 0, 10], [0, 1, 19], [0, 2, 8], [0, 3, 24], [0, 4, 67], [1, 0, 92], [1, 1, 58], [1, 2, 78], [1, 3, 117], [1, 4, 48], [2, 0, 35], [2, 1, 15], [2, 2, 123], [2, 3, 64], [2, 4, 52], [3, 0, 72], [3, 1, 132], [3, 2, 114], [3, 3, 19], [3, 4, 16], [4, 0, 38], [4, 1, 5], [4, 2, 8], [4, 3, 117], [4, 4, 115], [5, 0, 88], [5, 1, 32], [5, 2, 12], [5, 3, 6], [5, 4, 120], [6, 0, 13], [6, 1, 44], [6, 2, 88], [6, 3, 98], [6, 4, 96], [7, 0, 31], [7, 1, 1], [7, 2, 82], [7, 3, 32], [7, 4, 30], [8, 0, 85], [8, 1, 97], [8, 2, 123], [8, 3, 64], [8, 4, 84], [9, 0, 47], [9, 1, 114], [9, 2, 31], [9, 3, 48], [9, 4, 91]]
x_cat = ['Alexander', 'Marie', 'Maximilian', 'Sophia', 'Lukas', 'Maria', 'Leon', 'Anna', 'Tim', 'Laura']
y_cat = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
df = pd.DataFrame(data=data)
df = df.apply(mapping, axis=1)
df.columns = ['Name', 'Day', 'Sales']
df = df.set_index(['Name', 'Day'])
return df
def df_two_idx_several_col():
data = {
"South-East Asia": {
"Sri Lanka": {
"Communicable & other Group I": "75.5",
"Injuries": "89.0",
"Noncommunicable diseases": "501.2"
},
"Bangladesh": {
"Noncommunicable diseases": "548.9",
"Injuries": "64.0",
"Communicable & other Group I": "234.6"
},
"Myanmar": {
"Communicable & other Group I": "316.4",
"Injuries": "102.0",
"Noncommunicable diseases": "708.7"
},
"Maldives": {
"Injuries": "35.0",
"Noncommunicable diseases": "487.2",
"Communicable & other Group I": "59.2"
},
"Democratic People's Republic of Korea": {
"Injuries": "91.9",
"Noncommunicable diseases": "751.4",
"Communicable & other Group I": "117.3"
},
"Bhutan": {
"Injuries": "142.2",
"Noncommunicable diseases": "572.8",
"Communicable & other Group I": "186.9"
},
"Thailand": {
"Injuries": "72.8",
"Communicable & other Group I": "123.3",
"Noncommunicable diseases": "449.1"
},
"Nepal": {
"Noncommunicable diseases": "678.1",
"Injuries": "88.7",
"Communicable & other Group I": "251.8"
},
"Timor-Leste": {
"Injuries": "69.2",
"Noncommunicable diseases": "670.5",
"Communicable & other Group I": "343.5"
},
"India": {
"Communicable & other Group I": "253.0",
"Injuries": "115.9",
"Noncommunicable diseases": "682.3"
},
"Indonesia": {
"Injuries": "49.3",
"Noncommunicable diseases": "680.1",
"Communicable & other Group I": "162.4"
}
},
"Europe": {
"Hungary": {
"Communicable & other Group I": "16.8",
"Noncommunicable diseases": "602.8",
"Injuries": "44.3"
},
"Poland": {
"Communicable & other Group I": "22.6",
"Noncommunicable diseases": "494.5",
"Injuries": "48.9"
},
"Israel": {
"Communicable & other Group I": "31.2",
"Noncommunicable diseases": "311.2",
"Injuries": "20.8"
},
"France": {
"Communicable & other Group I": "21.4",
"Noncommunicable diseases": "313.2",
"Injuries": "34.6"
},
"Turkey": {
"Injuries": "39.1",
"Communicable & other Group I": "43.9",
"Noncommunicable diseases": "555.2"
},
"Kyrgyzstan": {
"Communicable & other Group I": "65.8",
"Injuries": "65.1",
"Noncommunicable diseases": "835.4"
},
"Croatia": {
"Communicable & other Group I": "12.2",
"Noncommunicable diseases": "495.8",
"Injuries": "40.1"
},
"Portugal": {
"Injuries": "25.2",
"Communicable & other Group I": "39.9",
"Noncommunicable diseases": "343.3"
},
"Greece": {
"Injuries": "26.5",
"Noncommunicable diseases": "365.0",
"Communicable & other Group I": "24.1"
},
"Italy": {
"Injuries": "20.1",
"Communicable & other Group I": "15.5",
"Noncommunicable diseases": "303.6"
},
"Belgium": {
"Communicable & other Group I": "27.8",
"Injuries": "38.9",
"Noncommunicable diseases": "356.8"
},
"Lithuania": {
"Noncommunicable diseases": "580.6",
"Communicable & other Group I": "25.5",
"Injuries": "76.4"
},
"Uzbekistan": {
"Communicable & other Group I": "85.8",
"Injuries": "47.4",
"Noncommunicable diseases": "810.9"
},
"Serbia": {
"Communicable & other Group I": "19.4",
"Injuries": "32.0",
"Noncommunicable diseases": "657.7"
},
"Austria": {
"Noncommunicable diseases": "359.5",
"Injuries": "30.6",
"Communicable & other Group I": "12.6"
},
"Bosnia and Herzegovina": {
"Injuries": "42.4",
"Noncommunicable diseases": "512.5",
"Communicable & other Group I": "20.0"
},
"Slovakia": {
"Injuries": "39.1",
"Communicable & other Group I": "35.3",
"Noncommunicable diseases": "532.5"
},
"The former Yugoslav republic of Macedonia": {
"Injuries": "24.0",
"Communicable & other Group I": "16.9",
"Noncommunicable diseases": "636.5"
},
"Sweden": {
"Communicable & other Group I": "19.3",
"Noncommunicable diseases": "333.5",
"Injuries": "26.1"
},
"Russian Federation": {
"Noncommunicable diseases": "790.3",
"Communicable & other Group I": "73.8",
"Injuries": "102.8"
},
"Republic of Moldova": {
"Noncommunicable diseases": "787.6",
"Injuries": "75.7",
"Communicable & other Group I": "44.5"
},
"Ireland": {
"Injuries": "31.8",
"Communicable & other Group I": "21.5",
"Noncommunicable diseases": "343.9"
},
"Estonia": {
"Injuries": "47.0",
"Communicable & other Group I": "18.5",
"Noncommunicable diseases": "510.7"
},
"Cyprus": {
"Noncommunicable diseases": "333.0",
"Injuries": "26.6",
"Communicable & other Group I": "16.2"
},
"Kazakhstan": {
"Noncommunicable diseases": "949.7",
"Injuries": "101.6",
"Communicable & other Group I": "55.3"
},
"Netherlands": {
"Noncommunicable diseases": "355.2",
"Injuries": "22.3",
"Communicable & other Group I": "25.5"
},
"Finland": {
"Noncommunicable diseases": "366.6",
"Injuries": "38.7",
"Communicable & other Group I": "9.0"
},
"Romania": {
"Noncommunicable diseases": "612.2",
"Injuries": "40.7",
"Communicable & other Group I": "38.5"
},
"Albania": {
"Noncommunicable diseases": "671.6",
"Injuries": "48.0",
"Communicable & other Group I": "46.5"
},
"Iceland": {
"Injuries": "29.0",
"Noncommunicable diseases": "311.7",
"Communicable & other Group I": "14.0"
},
"Azerbaijan": {
"Noncommunicable diseases": "664.3",
"Injuries": "33.6",
"Communicable & other Group I": "70.8"
},
"Tajikistan": {
"Injuries": "51.6",
"Communicable & other Group I": "147.7",
"Noncommunicable diseases": "752.6"
},
"Bulgaria": {
"Communicable & other Group I": "33.4",
"Injuries": "36.4",
"Noncommunicable diseases": "638.2"
},
"United Kingdom of Great Britain and Northern Ireland": {
"Communicable & other Group I": "28.5",
"Injuries": "21.5",
"Noncommunicable diseases": "358.8"
},
"Spain": {
"Communicable & other Group I": "19.1",
"Injuries": "17.8",
"Noncommunicable diseases": "323.1"
},
"Ukraine": {
"Communicable & other Group I": "69.3",
"Injuries": "67.3",
"Noncommunicable diseases": "749.0"
},
"Norway": {
"Noncommunicable diseases": "336.6",
"Communicable & other Group I": "25.2",
"Injuries": "25.6"
},
"Denmark": {
"Injuries": "22.5",
"Communicable & other Group I": "29.5",
"Noncommunicable diseases": "406.1"
},
"Belarus": {
"Noncommunicable diseases": "682.5",
"Communicable & other Group I": "28.3",
"Injuries": "91.3"
},
"Malta": {
"Noncommunicable diseases": "364.5",
"Injuries": "19.0",
"Communicable & other Group I": "23.6"
},
"Latvia": {
"Noncommunicable diseases": "623.7",
"Injuries": "54.5",
"Communicable & other Group I": "26.0"
},
"Turkmenistan": {
"Injuries": "93.0",
"Communicable & other Group I": "115.8",
"Noncommunicable diseases": "1025.1"
},
"Switzerland": {
"Communicable & other Group I": "14.5",
"Noncommunicable diseases": "291.6",
"Injuries": "25.4"
},
"Luxembourg": {
"Injuries": "31.1",
"Noncommunicable diseases": "317.8",
"Communicable & other Group I": "20.5"
},
"Georgia": {
"Injuries": "32.2",
"Communicable & other Group I": "39.3",
"Noncommunicable diseases": "615.2"
},
"Slovenia": {
"Noncommunicable diseases": "369.2",
"Communicable & other Group I": "15.4",
"Injuries": "44.2"
},
"Montenegro": {
"Communicable & other Group I": "18.7",
"Noncommunicable diseases": "571.5",
"Injuries": "41.2"
},
"Armenia": {
"Noncommunicable diseases": "847.5",
"Communicable & other Group I": "45.0",
"Injuries": "49.2"
},
"Germany": {
"Injuries": "23.0",
"Communicable & other Group I": "21.6",
"Noncommunicable diseases": "365.1"
},
"Czech Republic": {
"Injuries": "39.1",
"Noncommunicable diseases": "460.7",
"Communicable & other Group I": "27.0"
}
},
"Africa": {
"Equatorial Guinea": {
"Communicable & other Group I": "756.8",
"Injuries": "133.6",
"Noncommunicable diseases": "729.0"
},
"Madagascar": {
"Noncommunicable diseases": "648.6",
"Communicable & other Group I": "429.9",
"Injuries": "89.0"
},
"Swaziland": {
"Communicable & other Group I": "884.3",
"Injuries": "119.5",
"Noncommunicable diseases": "702.4"
},
"Congo": {
"Noncommunicable diseases": "632.3",
"Communicable & other Group I": "666.9",
"Injuries": "89.0"
},
"Burkina Faso": {
"Communicable & other Group I": "648.2",
"Noncommunicable diseases": "784.0",
"Injuries": "119.3"
},
"Guinea-Bissau": {
"Communicable & other Group I": "869.8",
"Noncommunicable diseases": "764.7",
"Injuries": "111.6"
},
"Democratic Republic of the Congo": {
"Noncommunicable diseases": "724.4",
"Injuries": "137.1",
"Communicable & other Group I": "920.7"
},
"Mozambique": {
"Injuries": "175.3",
"Noncommunicable diseases": "593.7",
"Communicable & other Group I": "998.1"
},
"Central African Republic": {
"Communicable & other Group I": "1212.1",
"Injuries": "107.9",
"Noncommunicable diseases": "550.8"
},
"United Republic of Tanzania": {
"Noncommunicable diseases": "569.8",
"Communicable & other Group I": "584.2",
"Injuries": "129.2"
},
"Cameroon": {
"Communicable & other Group I": "768.8",
"Injuries": "106.0",
"Noncommunicable diseases": "675.2"
},
"Togo": {
"Noncommunicable diseases": "679.0",
"Communicable & other Group I": "681.8",
"Injuries": "93.0"
},
"Eritrea": {
"Injuries": "118.7",
"Communicable & other Group I": "506.0",
"Noncommunicable diseases": "671.9"
},
"Namibia": {
"Injuries": "76.4",
"Noncommunicable diseases": "580.2",
"Communicable & other Group I": "356.6"
},
"Senegal": {
"Noncommunicable diseases": "558.1",
"Injuries": "89.3",
"Communicable & other Group I": "587.7"
},
"Chad": {
"Communicable & other Group I": "1070.9",
"Injuries": "114.5",
"Noncommunicable diseases": "712.6"
},
"Benin": {
"Injuries": "98.0",
"Noncommunicable diseases": "761.5",
"Communicable & other Group I": "577.3"
},
"Zimbabwe": {
"Communicable & other Group I": "711.3",
"Injuries": "82.5",
"Noncommunicable diseases": "598.9"
},
"Rwanda": {
"Noncommunicable diseases": "585.3",
"Injuries": "106.3",
"Communicable & other Group I": "401.7"
},
"Zambia": {
"Noncommunicable diseases": "587.4",
"Injuries": "156.4",
"Communicable & other Group I": "764.3"
},
"Mali": {
"Injuries": "119.5",
"Communicable & other Group I": "588.3",
"Noncommunicable diseases": "866.1"
},
"Ethiopia": {
"Injuries": "94.5",
"Communicable & other Group I": "558.9",
"Noncommunicable diseases": "476.3"
},
"South Africa": {
"Communicable & other Group I": "611.6",
"Injuries": "103.5",
"Noncommunicable diseases": "710.9"
},
"Burundi": {
"Injuries": "146.6",
"Communicable & other Group I": "704.8",
"Noncommunicable diseases": "729.5"
},
"Cabo Verde": {
"Injuries": "54.4",
"Noncommunicable diseases": "482.1",
"Communicable & other Group I": "141.9"
},
"Liberia": {
"Noncommunicable diseases": "656.9",
"Injuries": "83.3",
"Communicable & other Group I": "609.1"
},
"Uganda": {
"Noncommunicable diseases": "664.4",
"Communicable & other Group I": "696.7",
"Injuries": "166.8"
},
"Mauritius": {
"Noncommunicable diseases": "576.5",
"Injuries": "44.1",
"Communicable & other Group I": "61.8"
},
"Algeria": {
"Noncommunicable diseases": "710.4",
"Injuries": "53.8",
"Communicable & other Group I": "97.8"
},
"C\u00f4te d'Ivoire": {
"Noncommunicable diseases": "794.0",
"Injuries": "124.0",
"Communicable & other Group I": "861.3"
},
"Malawi": {
"Injuries": "97.7",
"Communicable & other Group I": "777.6",
"Noncommunicable diseases": "655.0"
},
"Botswana": {
"Injuries": "87.9",
"Noncommunicable diseases": "612.2",
"Communicable & other Group I": "555.3"
},
"Guinea": {
"Injuries": "96.0",
"Noncommunicable diseases": "681.1",
"Communicable & other Group I": "679.6"
},
"Ghana": {
"Injuries": "76.1",
"Noncommunicable diseases": "669.9",
"Communicable & other Group I": "476.0"
},
"Kenya": {
"Noncommunicable diseases": "514.7",
"Injuries": "101.1",
"Communicable & other Group I": "657.5"
},
"Gambia": {
"Noncommunicable diseases": "629.6",
"Injuries": "96.0",
"Communicable & other Group I": "590.5"
},
"Angola": {
"Injuries": "137.8",
"Noncommunicable diseases": "768.4",
"Communicable & other Group I": "873.3"
},
"Sierra Leone": {
"Communicable & other Group I": "1327.4",
"Noncommunicable diseases": "963.5",
"Injuries": "149.5"
},
"Mauritania": {
"Communicable & other Group I": "619.1",
"Injuries": "83.4",
"Noncommunicable diseases": "555.1"
},
"Comoros": {
"Communicable & other Group I": "494.6",
"Injuries": "132.4",
"Noncommunicable diseases": "695.5"
},
"Gabon": {
"Noncommunicable diseases": "504.6",
"Injuries": "77.4",
"Communicable & other Group I": "589.4"
},
"Niger": {
"Injuries": "97.6",
"Communicable & other Group I": "740.0",
"Noncommunicable diseases": "649.1"
},
"Lesotho": {
"Communicable & other Group I": "1110.5",
"Injuries": "142.5",
"Noncommunicable diseases": "671.8"
},
"Nigeria": {
"Noncommunicable diseases": "673.7",
"Communicable & other Group I": "866.2",
"Injuries": "145.6"
}
},
"Americas": {
"Canada": {
"Noncommunicable diseases": "318.0",
"Injuries": "31.3",
"Communicable & other Group I": "22.6"
},
"Bolivia (Plurinational State of)": {
"Communicable & other Group I": "226.2",
"Noncommunicable diseases": "635.3",
"Injuries": "100.0"
},
"Haiti": {
"Communicable & other Group I": "405.4",
"Noncommunicable diseases": "724.6",
"Injuries": "89.3"
},
"Belize": {
"Noncommunicable diseases": "470.7",
"Injuries": "82.0",
"Communicable & other Group I": "104.6"
},
"Suriname": {
"Injuries": "70.5",
"Communicable & other Group I": "83.7",
"Noncommunicable diseases": "374.8"
},
"Argentina": {
"Communicable & other Group I": "68.7",
"Injuries": "50.7",
"Noncommunicable diseases": "467.3"
},
"Mexico": {
"Injuries": "63.2",
"Communicable & other Group I": "57.0",
"Noncommunicable diseases": "468.3"
},
"Jamaica": {
"Injuries": "51.5",
"Communicable & other Group I": "97.0",
"Noncommunicable diseases": "519.1"
},
"Peru": {
"Noncommunicable diseases": "363.5",
"Injuries": "47.9",
"Communicable & other Group I": "121.3"
},
"Brazil": {
"Injuries": "80.2",
"Communicable & other Group I": "92.8",
"Noncommunicable diseases": "513.8"
},
"Venezuela (Bolivarian Republic of)": {
"Communicable & other Group I": "58.2",
"Injuries": "103.2",
"Noncommunicable diseases": "410.6"
},
"Paraguay": {
"Noncommunicable diseases": "485.5",
"Communicable & other Group I": "77.3",
"Injuries": "67.6"
},
"Chile": {
"Noncommunicable diseases": "366.5",
"Communicable & other Group I": "36.3",
"Injuries": "41.2"
},
"Trinidad and Tobago": {
"Noncommunicable diseases": "705.3",
"Communicable & other Group I": "80.4",
"Injuries": "98.4"
},
"Colombia": {
"Noncommunicable diseases": "377.3",
"Communicable & other Group I": "55.0",
"Injuries": "72.6"
},
"Cuba": {
"Injuries": "45.3",
"Noncommunicable diseases": "421.8",
"Communicable & other Group I": "33.2"
},
"El Salvador": {
"Noncommunicable diseases": "474.9",
"Injuries": "157.7",
"Communicable & other Group I": "96.2"
},
"Honduras": {
"Injuries": "80.8",
"Communicable & other Group I": "117.5",
"Noncommunicable diseases": "441.5"
},
"Ecuador": {
"Noncommunicable diseases": "409.7",
"Injuries": "83.7",
"Communicable & other Group I": "97.3"
},
"Costa Rica": {
"Communicable & other Group I": "30.5",
"Noncommunicable diseases": "391.8",
"Injuries": "46.5"
},
"Dominican Republic": {
"Noncommunicable diseases": "396.0",
"Injuries": "66.4",
"Communicable & other Group I": "76.8"
},
"Nicaragua": {
"Communicable & other Group I": "75.2",
"Injuries": "64.4",
"Noncommunicable diseases": "546.6"
},
"Barbados": {
"Noncommunicable diseases": "404.5",
"Injuries": "28.0",
"Communicable & other Group I": "60.8"
},
"Uruguay": {
"Noncommunicable diseases": "446.0",
"Injuries": "53.8",
"Communicable & other Group I": "46.2"
},
"Panama": {
"Communicable & other Group I": "86.1",
"Injuries": "67.4",
"Noncommunicable diseases": "372.9"
},
"Bahamas": {
"Noncommunicable diseases": "465.2",
"Injuries": "45.7",
"Communicable & other Group I": "122.0"
},
"Guyana": {
"Communicable & other Group I": "177.2",
"Noncommunicable diseases": "1024.2",
"Injuries": "150.0"
},
"United States of America": {
"Noncommunicable diseases": "412.8",
"Injuries": "44.2",
"Communicable & other Group I": "31.3"
},
"Guatemala": {
"Communicable & other Group I": "212.7",
"Noncommunicable diseases": "409.4",
"Injuries": "111.0"
}
},
"Eastern Mediterranean": {
"Egypt": {
"Communicable & other Group I": "74.3",
"Noncommunicable diseases": "781.7",
"Injuries": "33.5"
},
"South Sudan": {
"Injuries": "143.4",
"Communicable & other Group I": "831.3",
"Noncommunicable diseases": "623.4"
},
"Sudan": {
"Injuries": "133.6",
"Noncommunicable diseases": "551.0",
"Communicable & other Group I": "495.0"
},
"Libya": {
"Injuries": "62.8",
"Noncommunicable diseases": "550.0",
"Communicable & other Group I": "52.6"
},
"Jordan": {
"Noncommunicable diseases": "640.3",
"Injuries": "53.5",
"Communicable & other Group I": "52.5"
},
"Pakistan": {
"Communicable & other Group I": "296.0",
"Noncommunicable diseases": "669.3",
"Injuries": "98.7"
},
"Djibouti": {
"Noncommunicable diseases": "631.1",
"Communicable & other Group I": "626.0",
"Injuries": "106.0"
},
"Syrian Arab Republic": {
"Communicable & other Group I": "41.0",
"Injuries": "308.0",
"Noncommunicable diseases": "572.7"
},
"Morocco": {
"Noncommunicable diseases": "707.7",
"Communicable & other Group I": "131.5",
"Injuries": "47.0"
},
"Yemen": {
"Communicable & other Group I": "515.0",
"Noncommunicable diseases": "626.9",
"Injuries": "84.3"
},
"Bahrain": {
"Injuries": "33.5",
"Noncommunicable diseases": "505.7",
"Communicable & other Group I": "48.5"
},
"United Arab Emirates": {
"Noncommunicable diseases": "546.8",
"Injuries": "31.5",
"Communicable & other Group I": "35.6"
},
"Lebanon": {
"Noncommunicable diseases": "384.6",
"Injuries": "40.6",
"Communicable & other Group I": "30.5"
},
"Saudi Arabia": {
"Noncommunicable diseases": "549.4",
"Injuries": "41.1",
"Communicable & other Group I": "71.3"
},
"Iran (Islamic Republic of)": {
"Injuries": "74.9",
"Communicable & other Group I": "56.2",
"Noncommunicable diseases": "569.3"
},
"Iraq": {
"Communicable & other Group I": "87.0",
"Noncommunicable diseases": "715.5",
"Injuries": "128.5"
},
"Qatar": {
"Communicable & other Group I": "28.3",
"Injuries": "41.0",
"Noncommunicable diseases": "407.0"
},
"Afghanistan": {
"Communicable & other Group I": "362.7",
"Injuries": "169.2",
"Noncommunicable diseases": "846.3"
},
"Somalia": {
"Noncommunicable diseases": "550.7",
"Communicable & other Group I": "927.2",
"Injuries": "188.5"
},
"Kuwait": {
"Communicable & other Group I": "82.5",
"Injuries": "25.4",
"Noncommunicable diseases": "406.3"
},
"Oman": {
"Injuries": "52.8",
"Noncommunicable diseases": "478.2",
"Communicable & other Group I": "84.2"
},
"Tunisia": {
"Noncommunicable diseases": "509.3",
"Communicable & other Group I": "65.0",
"Injuries": "39.1"
}
},
"Western Pacific": {
"Mongolia": {
"Injuries": "69.4",
"Noncommunicable diseases": "966.5",
"Communicable & other Group I": "82.8"
},
"Cambodia": {
"Injuries": "62.2",
"Communicable & other Group I": "227.5",
"Noncommunicable diseases": "394.0"
},
"Japan": {
"Injuries": "40.5",
"Noncommunicable diseases": "244.2",
"Communicable & other Group I": "33.9"
},
"Brunei Darussalam": {
"Injuries": "44.6",
"Noncommunicable diseases": "475.3",
"Communicable & other Group I": "56.1"
},
"Solomon Islands": {
"Communicable & other Group I": "230.6",
"Injuries": "75.1",
"Noncommunicable diseases": "709.7"
},
"Viet Nam": {
"Communicable & other Group I": "96.0",
"Injuries": "59.0",
"Noncommunicable diseases": "435.4"
},
"Lao People's Democratic Republic": {
"Communicable & other Group I": "328.7",
"Injuries": "75.2",
"Noncommunicable diseases": "680.0"
},
"China": {
"Communicable & other Group I": "41.4",
"Noncommunicable diseases": "576.3",
"Injuries": "50.4"
},
"New Zealand": {
"Injuries": "32.9",
"Noncommunicable diseases": "313.6",
"Communicable & other Group I": "18.0"
},
"Papua New Guinea": {
"Injuries": "100.1",
"Communicable & other Group I": "554.3",
"Noncommunicable diseases": "693.2"
},
"Philippines": {
"Communicable & other Group I": "226.4",
"Noncommunicable diseases": "720.0",
"Injuries": "53.8"
},
"Malaysia": {
"Injuries": "62.8",
"Noncommunicable diseases": "563.2",
"Communicable & other Group I": "117.4"
},
"Australia": {
"Communicable & other Group I": "13.7",
"Noncommunicable diseases": "302.9",
"Injuries": "28.2"
},
"Fiji": {
"Noncommunicable diseases": "804.0",
"Injuries": "64.0",
"Communicable & other Group I": "105.2"
},
"Singapore": {
"Communicable & other Group I": "66.2",
"Noncommunicable diseases": "264.8",
"Injuries": "17.5"
},
"Republic of Korea": {
"Injuries": "53.1",
"Communicable & other Group I": "33.8",
"Noncommunicable diseases": "302.1"
}
}
}
key = data.keys()
li_df = []
for r in data.keys():
df = pd.read_json(json.dumps(data[r])).T
df.index.name = 'Country'
df['Region'] = r
df = df.set_index('Region', append=True)
df = df.reorder_levels(['Region', 'Country'])
li_df.append(df)
df = pd.concat(li_df)
return df
|
Tangcuyu/perfectinfo | refs/heads/master | lib/socksd/node_modules/node-gyp/gyp/pylib/gyp/input.py | 713 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
willingc/oh-mainline | refs/heads/master | vendor/packages/scrapy/scrapy/contrib/downloadermiddleware/httpcache.py | 16 | from __future__ import with_statement
import os
from os.path import join, exists
from time import time
import cPickle as pickle
from w3lib.http import headers_dict_to_raw, headers_raw_to_dict
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.http import Headers
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.responsetypes import responsetypes
from scrapy.utils.request import request_fingerprint
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import load_object
from scrapy.utils.project import data_path
from scrapy import conf
class HttpCacheMiddleware(object):
def __init__(self, settings=conf.settings):
if not settings.getbool('HTTPCACHE_ENABLED'):
raise NotConfigured
self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings)
self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING')
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_http_codes = map(int, settings.getlist('HTTPCACHE_IGNORE_HTTP_CODES'))
dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
def spider_opened(self, spider):
self.storage.open_spider(spider)
def spider_closed(self, spider):
self.storage.close_spider(spider)
def process_request(self, request, spider):
if not self.is_cacheable(request):
return
response = self.storage.retrieve_response(spider, request)
if response and self.is_cacheable_response(response):
response.flags.append('cached')
return response
elif self.ignore_missing:
raise IgnoreRequest("Ignored request not in cache: %s" % request)
def process_response(self, request, response, spider):
if self.is_cacheable(request) and self.is_cacheable_response(response):
self.storage.store_response(spider, request, response)
return response
def is_cacheable_response(self, response):
return response.status not in self.ignore_http_codes
def is_cacheable(self, request):
return urlparse_cached(request).scheme not in self.ignore_schemes
class FilesystemCacheStorage(object):
def __init__(self, settings=conf.settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'])
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def retrieve_response(self, spider, request):
"""Return response if present in cache, or None otherwise."""
metadata = self._read_meta(spider, request)
if metadata is None:
return # not cached
rpath = self._get_request_path(spider, request)
with open(join(rpath, 'response_body'), 'rb') as f:
body = f.read()
with open(join(rpath, 'response_headers'), 'rb') as f:
rawheaders = f.read()
url = metadata.get('response_url')
status = metadata['status']
headers = Headers(headers_raw_to_dict(rawheaders))
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
"""Store the given response in the cache."""
rpath = self._get_request_path(spider, request)
if not exists(rpath):
os.makedirs(rpath)
metadata = {
'url': request.url,
'method': request.method,
'status': response.status,
'response_url': response.url,
'timestamp': time(),
}
with open(join(rpath, 'meta'), 'wb') as f:
f.write(repr(metadata))
with open(join(rpath, 'pickled_meta'), 'wb') as f:
pickle.dump(metadata, f, protocol=2)
with open(join(rpath, 'response_headers'), 'wb') as f:
f.write(headers_dict_to_raw(response.headers))
with open(join(rpath, 'response_body'), 'wb') as f:
f.write(response.body)
with open(join(rpath, 'request_headers'), 'wb') as f:
f.write(headers_dict_to_raw(request.headers))
with open(join(rpath, 'request_body'), 'wb') as f:
f.write(request.body)
def _get_request_path(self, spider, request):
key = request_fingerprint(request)
return join(self.cachedir, spider.name, key[0:2], key)
def _read_meta(self, spider, request):
rpath = self._get_request_path(spider, request)
metapath = join(rpath, 'pickled_meta')
if not exists(metapath):
return # not found
mtime = os.stat(rpath).st_mtime
if 0 < self.expiration_secs < time() - mtime:
return # expired
with open(metapath, 'rb') as f:
return pickle.load(f)
|
nep0muck/cavita-bremen | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
litchfield/django | refs/heads/master | django/conf/locale/sk/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
PoundPay/wtforms | refs/heads/master | tests/ext_appengine/tests.py | 3 | #!/usr/bin/env python
"""
Unittests for wtforms.ext.appengine
To run the tests, use NoseGAE:
easy_install nose
easy_install nosegae
nosetests --with-gae --without-sandbox
"""
import sys, os
WTFORMS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, WTFORMS_DIR)
from unittest import TestCase
from google.appengine.ext import db
from wtforms import Form, fields as f, validators
from wtforms.ext.appengine.db import model_form
from wtforms.ext.appengine.fields import GeoPtPropertyField
class DummyPostData(dict):
def getlist(self, key):
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
class Author(db.Model):
name = db.StringProperty(required=True)
city = db.StringProperty()
age = db.IntegerProperty(required=True)
is_admin = db.BooleanProperty(default=False)
class Book(db.Model):
author = db.ReferenceProperty(Author)
class AllPropertiesModel(db.Model):
"""Property names are ugly, yes."""
prop_string = db.StringProperty()
prop_byte_string = db.ByteStringProperty()
prop_boolean = db.BooleanProperty()
prop_integer = db.IntegerProperty()
prop_float = db.FloatProperty()
prop_date_time = db.DateTimeProperty()
prop_date = db.DateProperty()
prop_time = db.TimeProperty()
prop_list = db.ListProperty(int)
prop_string_list = db.StringListProperty()
prop_reference = db.ReferenceProperty()
prop_self_refeference = db.SelfReferenceProperty()
prop_user = db.UserProperty()
prop_blob = db.BlobProperty()
prop_text = db.TextProperty()
prop_category = db.CategoryProperty()
prop_link = db.LinkProperty()
prop_email = db.EmailProperty()
prop_geo_pt = db.GeoPtProperty()
prop_im = db.IMProperty()
prop_phone_number = db.PhoneNumberProperty()
prop_postal_address = db.PostalAddressProperty()
prop_rating = db.RatingProperty()
class DateTimeModel(db.Model):
prop_date_time_1 = db.DateTimeProperty()
prop_date_time_2 = db.DateTimeProperty(auto_now=True)
prop_date_time_3 = db.DateTimeProperty(auto_now_add=True)
prop_date_1 = db.DateProperty()
prop_date_2 = db.DateProperty(auto_now=True)
prop_date_3 = db.DateProperty(auto_now_add=True)
prop_time_1 = db.TimeProperty()
prop_time_2 = db.TimeProperty(auto_now=True)
prop_time_3 = db.TimeProperty(auto_now_add=True)
class TestModelForm(TestCase):
def tearDown(self):
for entity in Author.all():
db.delete(entity)
for entity in Book.all():
db.delete(entity)
def test_model_form_basic(self):
form_class = model_form(Author)
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'city'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), True)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.city, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
self.assertEqual(isinstance(form.is_admin, f.BooleanField), True)
def test_required_field(self):
form_class = model_form(Author)
form = form_class()
self.assertEqual(form.name.flags.required, True)
self.assertEqual(form.city.flags.required, False)
self.assertEqual(form.age.flags.required, True)
self.assertEqual(form.is_admin.flags.required, False)
def test_default_value(self):
form_class = model_form(Author)
form = form_class()
self.assertEqual(form.name.default, None)
self.assertEqual(form.city.default, None)
self.assertEqual(form.age.default, None)
self.assertEqual(form.is_admin.default, False)
def test_model_form_only(self):
form_class = model_form(Author, only=['name', 'age'])
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'city'), False)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), False)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
def test_model_form_exclude(self):
form_class = model_form(Author, exclude=['is_admin'])
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'city'), True)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), False)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.city, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
def test_datetime_model(self):
"""Fields marked as auto_add / auto_add_now should not be included."""
form_class = model_form(DateTimeModel)
self.assertEqual(hasattr(form_class, 'prop_date_time_1'), True)
self.assertEqual(hasattr(form_class, 'prop_date_time_2'), False)
self.assertEqual(hasattr(form_class, 'prop_date_time_3'), False)
self.assertEqual(hasattr(form_class, 'prop_date_1'), True)
self.assertEqual(hasattr(form_class, 'prop_date_2'), False)
self.assertEqual(hasattr(form_class, 'prop_date_3'), False)
self.assertEqual(hasattr(form_class, 'prop_time_1'), True)
self.assertEqual(hasattr(form_class, 'prop_time_2'), False)
self.assertEqual(hasattr(form_class, 'prop_time_3'), False)
def test_not_implemented_properties(self):
# This should not raise NotImplementedError.
form_class = model_form(AllPropertiesModel)
# These should be set.
self.assertEqual(hasattr(form_class, 'prop_string'), True)
self.assertEqual(hasattr(form_class, 'prop_byte_string'), True)
self.assertEqual(hasattr(form_class, 'prop_boolean'), True)
self.assertEqual(hasattr(form_class, 'prop_integer'), True)
self.assertEqual(hasattr(form_class, 'prop_float'), True)
self.assertEqual(hasattr(form_class, 'prop_date_time'), True)
self.assertEqual(hasattr(form_class, 'prop_date'), True)
self.assertEqual(hasattr(form_class, 'prop_time'), True)
self.assertEqual(hasattr(form_class, 'prop_string_list'), True)
self.assertEqual(hasattr(form_class, 'prop_reference'), True)
self.assertEqual(hasattr(form_class, 'prop_self_refeference'), True)
self.assertEqual(hasattr(form_class, 'prop_blob'), True)
self.assertEqual(hasattr(form_class, 'prop_text'), True)
self.assertEqual(hasattr(form_class, 'prop_category'), True)
self.assertEqual(hasattr(form_class, 'prop_link'), True)
self.assertEqual(hasattr(form_class, 'prop_email'), True)
self.assertEqual(hasattr(form_class, 'prop_geo_pt'), True)
self.assertEqual(hasattr(form_class, 'prop_phone_number'), True)
self.assertEqual(hasattr(form_class, 'prop_postal_address'), True)
self.assertEqual(hasattr(form_class, 'prop_rating'), True)
# These should NOT be set.
self.assertEqual(hasattr(form_class, 'prop_list'), False)
self.assertEqual(hasattr(form_class, 'prop_user'), False)
self.assertEqual(hasattr(form_class, 'prop_im'), False)
def test_populate_form(self):
entity = Author(key_name='test', name='John', city='Yukon', age=25, is_admin=True)
entity.put()
obj = Author.get_by_key_name('test')
form_class = model_form(Author)
form = form_class(obj=obj)
self.assertEqual(form.name.data, 'John')
self.assertEqual(form.city.data, 'Yukon')
self.assertEqual(form.age.data, 25)
self.assertEqual(form.is_admin.data, True)
def test_field_attributes(self):
form_class = model_form(Author, field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
},
'city': {
'label': 'City',
'description': 'The city in which you live, not the one in which you were born.',
},
'is_admin': {
'label': 'Administrative rights',
},
})
form = form_class()
self.assertEqual(form.name.label.text, 'Full name')
self.assertEqual(form.name.description, 'Your name')
self.assertEqual(form.age.label.text, 'Age')
self.assertEqual(form.city.label.text, 'City')
self.assertEqual(form.city.description, 'The city in which you live, not the one in which you were born.')
self.assertEqual(form.is_admin.label.text, 'Administrative rights')
def test_reference_property(self):
keys = ['__None']
for name in ['foo', 'bar', 'baz']:
author = Author(name=name, age=26)
author.put()
keys.append(str(author.key()))
form_class = model_form(Book)
form = form_class()
choices = []
i = 0
for key, name, value in form.author.iter_choices():
self.assertEqual(key, keys[i])
i += 1
class TestFields(TestCase):
class GeoTestForm(Form):
geo = GeoPtPropertyField()
def test_geopt_property(self):
form = self.GeoTestForm(DummyPostData(geo='5.0, -7.0'))
self.assert_(form.validate())
self.assertEquals(form.geo.data, u'5.0,-7.0')
form = self.GeoTestForm(DummyPostData(geo='5.0,-f'))
self.assert_(not form.validate())
|
d-grossman/magichour | refs/heads/master | magichour/validate/templategen.py | 2 | import collections
import distance
import itertools
import numpy as np
import random
from magichour.api.local.util.log import get_logger
from collections import defaultdict
logger = get_logger(__name__)
def sample(objs, ratio, seed=None):
if seed:
random.seed(seed)
return random.sample(objs, max(1, int(ratio * len(objs))))
def mean(l):
return sum(l) / float(len(l))
def logline_distance(logline1, logline2):
return distance.nlevenshtein(
logline1.text.strip().split(),
logline2.text.strip().split())
def mean_distance(point, points, distance_fn=logline_distance):
pts = list(points)
if not pts:
return 0
distances = [distance_fn(point, other_point) for other_point in pts]
return mean(distances)
def one_to_others_iter(values):
for idx in xrange(len(values)):
mask = [1] * len(values)
mask[idx] = 0
cur_value = values[idx]
# list(itertools.compress(values, mask))
others = itertools.compress(values, mask)
yield (cur_value, others)
def intracluster_dists(cluster):
intra_scores = []
for val, others in one_to_others_iter(cluster):
intra = mean_distance(val, others) # mean intracluster distance
intra_scores.append(intra)
return mean(intra_scores)
def multiintracluster_dists(data_dict):
intra_scores = []
keys_to_use = data_dict.keys()
logger.info("Processing %s clusters..." % len(keys_to_use))
for key in keys_to_use:
cluster = data_dict[key]
mean_intra = intracluster_dists(cluster)
intra_scores.append(mean_intra)
return mean(intra_scores)
def validate_intracluster(data_dict, junk_drawer):
logger.info("Processing regular clusters...")
mean_intra = multiintracluster_dists(data_dict)
logger.info("Processing junk drawer...")
mean_jd_intra = intracluster_dists(junk_drawer)
return (mean_intra, mean_jd_intra)
def dist_stats(arr):
np_arr = np.array(arr)
return (np_arr.mean(), np_arr.std())
def template_distance(template1, template2):
return distance.nlevenshtein(
template1.raw_str.strip().split(),
template2.raw_str.strip().split()
)
def intercluster_dists(templates):
inter_dists = []
for cur_template, other_templates in one_to_others_iter(templates):
results = [(template_distance(cur_template, other_template),
other_template) for other_template in other_templates]
results = sorted(results, key=lambda r: r[0]) # sort by distance
# take best one (i.e. distance to the closest template)
best = results[0]
inter_dists.append(best[0])
return mean(inter_dists)
def validation_sample(
eval_loglines,
gen_templates,
iterations,
sampling_ratio=None,
sampling_seed=None):
sample_mean_intras = []
sample_mean_jd_intra = []
sample_mean_inters = []
orig_eval_loglines = eval_loglines
orig_gen_templates = gen_templates
#logger.info("Creating closest cluster map... (eval_loglines = %s, gen_templates = %s)" % (eval_loglines, gen_templates))
#closest_cluster_map = find_closest_templates(eval_loglines, gen_templates)
for itr in xrange(1, iterations + 1):
logger.info("Running iteration %s..." % str(itr))
if sampling_ratio:
eval_loglines = sample(
orig_eval_loglines,
sampling_ratio,
sampling_seed)
relevant_templates = set(
[eval_logline.templateId for eval_logline in eval_loglines])
gen_templates = [
template for template in orig_gen_templates if template.id in relevant_templates]
logger.info("Sampled %s of %s loglines." %
(len(eval_loglines), len(orig_eval_loglines)))
logger.info("Calling intercluster_dists()...")
mean_inter = intercluster_dists(gen_templates)
logger.info("Creating data dictionary and junk drawer...")
data_dict, junk_drawer = get_data_dict_and_jd(eval_loglines)
logger.info("Calling validate_intracluster()...")
mean_intra, mean_jd_intra = validate_intracluster(
data_dict, junk_drawer)
sample_mean_intras.append(mean_intra)
sample_mean_jd_intra.append(mean_jd_intra)
sample_mean_inters.append(mean_inter)
return (dist_stats(sample_mean_intras),
dist_stats(sample_mean_jd_intra),
dist_stats(sample_mean_inters))
###
def closest_template_dist(
logline,
template,
distance_fn=distance.nlevenshtein):
return distance_fn(
logline.processed.strip().split(),
template.raw_str.strip().split())
def find_closest_templates(eval_loglines, templates):
closest_template_map = {}
for eval_logline in eval_loglines:
if eval_logline.processed not in closest_template_map:
scores = []
for template in templates:
if eval_logline.templateId != template.id:
score = closest_template_dist(eval_logline, template)
scores.append((score, template))
scores = sorted(scores, key=lambda x: x[0])
closest_template_map[eval_logline.processed] = scores
return closest_template_map
def get_data_dict_and_jd(eval_loglines):
data_dict = defaultdict(list)
for eval_logline in eval_loglines:
data_dict[eval_logline.templateId].append(eval_logline)
if -1 in data_dict:
junk_drawer = data_dict[-1]
del data_dict[-1]
else:
junk_drawer = []
return data_dict, junk_drawer
|
wenxer/peewee | refs/heads/master | playhouse/tests/test_migrate.py | 12 | import datetime
import os
from peewee import *
from peewee import print_
from playhouse.migrate import *
from playhouse.test_utils import count_queries
from playhouse.tests.base import database_initializer
from playhouse.tests.base import PeeweeTestCase
from playhouse.tests.base import skip_if
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
try:
import psycopg2
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql
except ImportError:
try:
import pymysql as mysql
except ImportError:
mysql = None
if mysql:
mysql_db = database_initializer.get_database('mysql')
else:
mysql_db = None
if psycopg2:
pg_db = database_initializer.get_database('postgres')
else:
pg_db = None
sqlite_db = SqliteDatabase(':memory:')
class Tag(Model):
tag = CharField()
class Person(Model):
first_name = CharField()
last_name = CharField()
dob = DateField(null=True)
class User(Model):
id = CharField(primary_key=True, max_length=20)
password = CharField(default='secret')
class Meta:
db_table = 'users'
class Page(Model):
name = TextField(unique=True, null=True)
user = ForeignKeyField(User, null=True, related_name='pages')
class IndexModel(Model):
first_name = CharField()
last_name = CharField()
data = IntegerField(unique=True)
class Meta:
database = sqlite_db
indexes = (
(('first_name', 'last_name'), True),
)
MODELS = [
Person,
Tag,
User,
Page,
]
class BaseMigrationTestCase(object):
database = None
migrator_class = None
# Each database behaves slightly differently.
_exception_add_not_null = True
_person_data = [
('Charlie', 'Leifer', None),
('Huey', 'Kitty', datetime.date(2011, 5, 1)),
('Mickey', 'Dog', datetime.date(2008, 6, 1)),
]
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
for model_class in MODELS:
model_class._meta.database = self.database
self.database.drop_tables(MODELS, True)
self.database.create_tables(MODELS)
self.migrator = self.migrator_class(self.database)
if 'newpages' in User._meta.reverse_rel:
del User._meta.reverse_rel['newpages']
delattr(User, 'newpages')
def tearDown(self):
super(BaseMigrationTestCase, self).tearDown()
for model_class in MODELS:
model_class._meta.database = self.database
self.database.drop_tables(MODELS, True)
def test_add_column(self):
# Create some fields with a variety of NULL / default values.
df = DateTimeField(null=True)
df_def = DateTimeField(default=datetime.datetime(2012, 1, 1))
cf = CharField(max_length=200, default='')
bf = BooleanField(default=True)
ff = FloatField(default=0)
# Create two rows in the Tag table to test the handling of adding
# non-null fields.
t1 = Tag.create(tag='t1')
t2 = Tag.create(tag='t2')
# Convenience function for generating `add_column` migrations.
def add_column(field_name, field_obj):
return self.migrator.add_column('tag', field_name, field_obj)
# Run the migration.
migrate(
add_column('pub_date', df),
add_column('modified_date', df_def),
add_column('comment', cf),
add_column('is_public', bf),
add_column('popularity', ff))
# Create a new tag model to represent the fields we added.
class NewTag(Model):
tag = CharField()
pub_date = df
modified_date = df_def
comment = cf
is_public = bf
popularity = ff
class Meta:
database = self.database
db_table = Tag._meta.db_table
query = (NewTag
.select(
NewTag.id,
NewTag.tag,
NewTag.pub_date,
NewTag.modified_date,
NewTag.comment,
NewTag.is_public,
NewTag.popularity)
.order_by(NewTag.tag.asc()))
# Verify the resulting rows are correct.
self.assertEqual(list(query.tuples()), [
(t1.id, 't1', None, datetime.datetime(2012, 1, 1), '', True, 0.0),
(t2.id, 't2', None, datetime.datetime(2012, 1, 1), '', True, 0.0),
])
def _create_people(self):
for first, last, dob in self._person_data:
Person.create(first_name=first, last_name=last, dob=dob)
def get_column_names(self, tbl):
cursor = self.database.execute_sql('select * from %s limit 1' % tbl)
return set([col[0] for col in cursor.description])
def test_drop_column(self):
self._create_people()
migrate(
self.migrator.drop_column('person', 'last_name'),
self.migrator.drop_column('person', 'dob'))
column_names = self.get_column_names('person')
self.assertEqual(column_names, set(['id', 'first_name']))
User.create(id='charlie', password='12345')
User.create(id='huey', password='meow')
migrate(self.migrator.drop_column('users', 'password'))
column_names = self.get_column_names('users')
self.assertEqual(column_names, set(['id']))
data = [row for row in User.select(User.id).order_by(User.id).tuples()]
self.assertEqual(data, [
('charlie',),
('huey',),])
def test_rename_column(self):
self._create_people()
migrate(
self.migrator.rename_column('person', 'first_name', 'first'),
self.migrator.rename_column('person', 'last_name', 'last'))
column_names = self.get_column_names('person')
self.assertEqual(column_names, set(['id', 'first', 'last', 'dob']))
class NewPerson(Model):
first = CharField()
last = CharField()
dob = DateField()
class Meta:
database = self.database
db_table = Person._meta.db_table
query = (NewPerson
.select(
NewPerson.first,
NewPerson.last,
NewPerson.dob)
.order_by(NewPerson.first))
self.assertEqual(list(query.tuples()), self._person_data)
def test_rename_gh380(self):
u1 = User.create(id='charlie')
u2 = User.create(id='huey')
p1 = Page.create(name='p1-1', user=u1)
p2 = Page.create(name='p2-1', user=u1)
p3 = Page.create(name='p3-2', user=u2)
migrate(self.migrator.rename_column('page', 'name', 'title'))
column_names = self.get_column_names('page')
self.assertEqual(column_names, set(['id', 'title', 'user_id']))
class NewPage(Model):
title = TextField(unique=True, null=True)
user = ForeignKeyField(User, null=True, related_name='newpages')
class Meta:
database = self.database
db_table = Page._meta.db_table
query = (NewPage
.select(
NewPage.title,
NewPage.user)
.order_by(NewPage.title))
self.assertEqual(
[(np.title, np.user.id) for np in query],
[('p1-1', 'charlie'), ('p2-1', 'charlie'), ('p3-2', 'huey')])
def test_add_not_null(self):
self._create_people()
def addNotNull():
with self.database.transaction():
migrate(self.migrator.add_not_null('person', 'dob'))
# We cannot make the `dob` field not null because there is currently
# a null value there.
if self._exception_add_not_null:
self.assertRaises(IntegrityError, addNotNull)
(Person
.update(dob=datetime.date(2000, 1, 2))
.where(Person.dob >> None)
.execute())
# Now we can make the column not null.
addNotNull()
# And attempting to insert a null value results in an integrity error.
with self.database.transaction():
self.assertRaises(
IntegrityError,
Person.create,
first_name='Kirby',
last_name='Snazebrauer',
dob=None)
def test_drop_not_null(self):
self._create_people()
migrate(
self.migrator.drop_not_null('person', 'first_name'),
self.migrator.drop_not_null('person', 'last_name'))
p = Person.create(first_name=None, last_name=None)
query = (Person
.select()
.where(
(Person.first_name >> None) &
(Person.last_name >> None)))
self.assertEqual(query.count(), 1)
def test_rename_table(self):
t1 = Tag.create(tag='t1')
t2 = Tag.create(tag='t2')
# Move the tag data into a new model/table.
class Tag_asdf(Tag):
pass
self.assertEqual(Tag_asdf._meta.db_table, 'tag_asdf')
# Drop the new table just to be safe.
Tag_asdf.drop_table(True)
# Rename the tag table.
migrate(self.migrator.rename_table('tag', 'tag_asdf'))
# Verify the data was moved.
query = (Tag_asdf
.select()
.order_by(Tag_asdf.tag))
self.assertEqual([t.tag for t in query], ['t1', 't2'])
# Verify the old table is gone.
with self.database.transaction():
self.assertRaises(
DatabaseError,
Tag.create,
tag='t3')
def test_add_index(self):
# Create a unique index on first and last names.
columns = ('first_name', 'last_name')
migrate(self.migrator.add_index('person', columns, True))
Person.create(first_name='first', last_name='last')
with self.database.transaction():
self.assertRaises(
IntegrityError,
Person.create,
first_name='first',
last_name='last')
def test_drop_index(self):
# Create a unique index.
self.test_add_index()
# Now drop the unique index.
migrate(
self.migrator.drop_index('person', 'person_first_name_last_name'))
Person.create(first_name='first', last_name='last')
query = (Person
.select()
.where(
(Person.first_name == 'first') &
(Person.last_name == 'last')))
self.assertEqual(query.count(), 2)
def test_add_and_remove(self):
operations = []
field = CharField(default='foo')
for i in range(10):
operations.append(self.migrator.add_column('tag', 'foo', field))
operations.append(self.migrator.drop_column('tag', 'foo'))
migrate(*operations)
col_names = self.get_column_names('tag')
self.assertEqual(col_names, set(['id', 'tag']))
def test_multiple_operations(self):
self.database.execute_sql('drop table if exists person_baze;')
self.database.execute_sql('drop table if exists person_nugg;')
self._create_people()
field_n = CharField(null=True)
field_d = CharField(default='test')
operations = [
self.migrator.add_column('person', 'field_null', field_n),
self.migrator.drop_column('person', 'first_name'),
self.migrator.add_column('person', 'field_default', field_d),
self.migrator.rename_table('person', 'person_baze'),
self.migrator.rename_table('person_baze', 'person_nugg'),
self.migrator.rename_column('person_nugg', 'last_name', 'last'),
self.migrator.add_index('person_nugg', ('last',), True),
]
migrate(*operations)
class PersonNugg(Model):
field_null = field_n
field_default = field_d
last = CharField()
dob = DateField(null=True)
class Meta:
database = self.database
db_table = 'person_nugg'
people = (PersonNugg
.select(
PersonNugg.field_null,
PersonNugg.field_default,
PersonNugg.last,
PersonNugg.dob)
.order_by(PersonNugg.last)
.tuples())
expected = [
(None, 'test', 'Dog', datetime.date(2008, 6, 1)),
(None, 'test', 'Kitty', datetime.date(2011, 5, 1)),
(None, 'test', 'Leifer', None),
]
self.assertEqual(list(people), expected)
with self.database.transaction():
self.assertRaises(
IntegrityError,
PersonNugg.create,
last='Leifer',
field_default='bazer')
def test_add_foreign_key(self):
if hasattr(Person, 'newtag_set'):
delattr(Person, 'newtag_set')
del Person._meta.reverse_rel['newtag_set']
# Ensure no foreign keys are present at the beginning of the test.
self.assertEqual(self.database.get_foreign_keys('tag'), [])
field = ForeignKeyField(Person, null=True, to_field=Person.id)
migrate(self.migrator.add_column('tag', 'person_id', field))
class NewTag(Tag):
person = field
class Meta:
db_table = 'tag'
p = Person.create(first_name='First', last_name='Last')
t1 = NewTag.create(tag='t1', person=p)
t2 = NewTag.create(tag='t2')
t1_db = NewTag.get(NewTag.tag == 't1')
self.assertEqual(t1_db.person, p)
t2_db = NewTag.get(NewTag.tag == 't2')
self.assertEqual(t2_db.person, None)
foreign_keys = self.database.get_foreign_keys('tag')
self.assertEqual(len(foreign_keys), 1)
foreign_key = foreign_keys[0]
self.assertEqual(foreign_key.column, 'person_id')
self.assertEqual(foreign_key.dest_column, 'id')
self.assertEqual(foreign_key.dest_table, 'person')
def test_drop_foreign_key(self):
migrate(self.migrator.drop_column('page', 'user_id'))
columns = self.database.get_columns('page')
self.assertEqual(
sorted(column.name for column in columns),
['id', 'name'])
self.assertEqual(self.database.get_foreign_keys('page'), [])
def test_rename_foreign_key(self):
migrate(self.migrator.rename_column('page', 'user_id', 'huey_id'))
columns = self.database.get_columns('page')
self.assertEqual(
sorted(column.name for column in columns),
['huey_id', 'id', 'name'])
foreign_keys = self.database.get_foreign_keys('page')
self.assertEqual(len(foreign_keys), 1)
foreign_key = foreign_keys[0]
self.assertEqual(foreign_key.column, 'huey_id')
self.assertEqual(foreign_key.dest_column, 'id')
self.assertEqual(foreign_key.dest_table, 'users')
class SqliteMigrationTestCase(BaseMigrationTestCase, PeeweeTestCase):
database = sqlite_db
migrator_class = SqliteMigrator
def setUp(self):
super(SqliteMigrationTestCase, self).setUp()
IndexModel.drop_table(True)
IndexModel.create_table()
def test_valid_column_required(self):
self.assertRaises(
ValueError,
migrate,
self.migrator.drop_column('page', 'column_does_not_exist'))
self.assertRaises(
ValueError,
migrate,
self.migrator.rename_column('page', 'xx', 'yy'))
def test_table_case_insensitive(self):
migrate(self.migrator.drop_column('PaGe', 'name'))
column_names = self.get_column_names('page')
self.assertEqual(column_names, set(['id', 'user_id']))
testing_field = CharField(default='xx')
migrate(self.migrator.add_column('pAGE', 'testing', testing_field))
column_names = self.get_column_names('page')
self.assertEqual(column_names, set(['id', 'user_id', 'testing']))
migrate(self.migrator.drop_column('indeXmOdel', 'first_name'))
indexes = self.migrator.database.get_indexes('indexmodel')
self.assertEqual(len(indexes), 1)
self.assertEqual(indexes[0].name, 'indexmodel_data')
def test_add_column_indexed_table(self):
# Ensure that columns can be added to tables that have indexes.
field = CharField(default='')
migrate(self.migrator.add_column('indexmodel', 'foo', field))
db = self.migrator.database
columns = db.get_columns('indexmodel')
self.assertEqual(sorted(column.name for column in columns),
['data', 'first_name', 'foo', 'id', 'last_name'])
indexes = db.get_indexes('indexmodel')
self.assertEqual(
sorted((index.name, index.columns) for index in indexes),
[('indexmodel_data', ['data']),
('indexmodel_first_name_last_name', ['first_name', 'last_name'])])
def test_index_preservation(self):
with count_queries() as qc:
migrate(self.migrator.rename_column(
'indexmodel',
'first_name',
'first'))
queries = [log.msg for log in qc.get_queries()]
self.assertEqual(queries, [
# Get all the columns.
('PRAGMA table_info("indexmodel")', None),
# Get the table definition.
('select name, sql from sqlite_master '
'where type=? and LOWER(name)=?',
['table', 'indexmodel']),
# Get the indexes and indexed columns for the table.
('SELECT name, sql FROM sqlite_master '
'WHERE tbl_name = ? AND type = ? ORDER BY name',
('indexmodel', 'index')),
('PRAGMA index_list("indexmodel")', None),
('PRAGMA index_info("indexmodel_data")', None),
('PRAGMA index_info("indexmodel_first_name_last_name")', None),
# Get foreign keys.
('PRAGMA foreign_key_list("indexmodel")', None),
# Drop any temporary table, if it exists.
('DROP TABLE IF EXISTS "indexmodel__tmp__"', []),
# Create a temporary table with the renamed column.
('CREATE TABLE "indexmodel__tmp__" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"first" VARCHAR(255) NOT NULL, '
'"last_name" VARCHAR(255) NOT NULL, '
'"data" INTEGER NOT NULL)', []),
# Copy data from original table into temporary table.
('INSERT INTO "indexmodel__tmp__" '
'("id", "first", "last_name", "data") '
'SELECT "id", "first_name", "last_name", "data" '
'FROM "indexmodel"', []),
# Drop the original table.
('DROP TABLE "indexmodel"', []),
# Rename the temporary table, replacing the original.
('ALTER TABLE "indexmodel__tmp__" RENAME TO "indexmodel"', []),
# Re-create the indexes.
('CREATE UNIQUE INDEX "indexmodel_data" '
'ON "indexmodel" ("data")', []),
('CREATE UNIQUE INDEX "indexmodel_first_last_name" '
'ON "indexmodel" ("first", "last_name")', [])
])
@skip_if(lambda: psycopg2 is None)
class PostgresqlMigrationTestCase(BaseMigrationTestCase, PeeweeTestCase):
database = pg_db
migrator_class = PostgresqlMigrator
@skip_if(lambda: mysql is None)
class MySQLMigrationTestCase(BaseMigrationTestCase, PeeweeTestCase):
database = mysql_db
migrator_class = MySQLMigrator
# MySQL does not raise an exception when adding a not null constraint
# to a column that contains NULL values.
_exception_add_not_null = False
|
klebercode/canaa | refs/heads/master | canaa/catalog/views.py | 1 | # coding: utf-8
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.template import RequestContext
from django.utils import translation
from slim.helpers import get_language_from_request
from canaa.context_processors import enterprise_proc, back_proc
from canaa.catalog.models import ProductGroup, Product, ProductInfo
def group(request):
context = {}
language = get_language_from_request(request)
results_kwargs = {}
if language is not None:
translation.activate(language)
results_kwargs.update({'language': language, 'visible': True})
# g_list = ProductGroup.objects.filter(visible=True)
g_list = ProductGroup.objects.filter(**results_kwargs)
search = request.GET.get('search', '')
if search:
g_list = g_list.filter(Q(name__icontains=search) |
Q(description__icontains=search))
paginator = Paginator(g_list, 6)
page = request.GET.get('page')
try:
groups = paginator.page(page)
except PageNotAnInteger:
groups = paginator.page(1)
except EmptyPage:
groups = paginator.page(paginator.num_pages)
context['groups'] = groups
context['search'] = search
return render(request, 'catalog/catalog_group.html', context,
context_instance=RequestContext(request,
processors=[enterprise_proc,
back_proc]
))
def item(request, group):
context = {}
p_list = Product.objects.filter(product_group__slug=group,
visible=True)
search = request.GET.get('search', '')
if search:
p_list = p_list.filter(Q(name__icontains=search) |
Q(description__icontains=search))
paginator = Paginator(p_list, 6)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
products = paginator.page(1)
except EmptyPage:
products = paginator.page(paginator.num_pages)
context['products'] = products
context['search'] = search
# deixei o filtro slug para deixar o filtro menor
context['infos'] = ProductInfo.objects.filter(product_group__slug=group,
visible=True)
return render(request, 'catalog/catalog_item.html', context,
context_instance=RequestContext(request,
processors=[enterprise_proc,
back_proc]
))
def detail(request, group, item):
context = {}
return render(request, 'catalog/catalog_detail.html', context,
context_instance=RequestContext(request,
processors=[enterprise_proc,
back_proc]
))
|
SatelliteQE/automation-tools | refs/heads/master | automation_tools/bz.py | 9 | # -*- encoding: utf-8 -*-
"""
Collection of functions to work with Bugzilla and Redmine.
copied from robottelo's robotello/decorators/__init__.py
"""
import bugzilla
import logging
import requests
from six.moves.xmlrpc_client import Fault
from xml.parsers.expat import ExpatError, ErrorString
BZ_OPEN_STATUSES = [
'NEW',
'ASSIGNED',
'POST',
'MODIFIED'
]
BUGZILLA_URL = "https://bugzilla.redhat.com/xmlrpc.cgi"
LOGGER = logging.getLogger(__name__)
OBJECT_CACHE = {}
REDMINE_URL = 'http://projects.theforeman.org'
# A cache mapping bug IDs to python-bugzilla bug objects.
_bugzilla = {}
# A cache used by redmine-related functions.
# * _redmine['closed_statuses'] is used by `_redmine_closed_issue_statuses`
#
_redmine = {
'closed_statuses': None,
'issues': {},
}
class BugFetchError(Exception):
"""Indicates an error occurred while fetching information about a bug."""
def _get_bugzilla_bug(bug_id):
"""Fetch bug ``bug_id``.
:param int bug_id: The ID of a bug in the Bugzilla database.
:return: A FRIGGIN UNDOCUMENTED python-bugzilla THING.
:raises BugFetchError: If an error occurs while fetching the bug. For
example, a network timeout occurs or the bug does not exist.
"""
# Is bug ``bug_id`` in the cache?
if bug_id in _bugzilla:
LOGGER.debug('Bugzilla bug {0} found in cache.'.format(bug_id))
else:
LOGGER.info('Bugzilla bug {0} not in cache. Fetching.'.format(bug_id))
# Make a network connection to the Bugzilla server.
try:
bz_conn = bugzilla.RHBugzilla()
bz_conn.connect(BUGZILLA_URL)
except (TypeError, ValueError):
raise BugFetchError(
'Could not connect to {0}'.format(BUGZILLA_URL)
)
# Fetch the bug and place it in the cache.
try:
_bugzilla[bug_id] = bz_conn.getbugsimple(bug_id)
except Fault as err:
raise BugFetchError(
'Could not fetch bug. Error: {0}'.format(err.faultString)
)
except ExpatError as err:
raise BugFetchError(
'Could not interpret bug. Error: {0}'
.format(ErrorString(err.code))
)
return _bugzilla[bug_id]
def _redmine_closed_issue_statuses():
"""Return a list of issue status IDs which indicate an issue is closed.
This list of issue status IDs is not hard-coded. Instead, the Redmine
server is consulted when generating this list.
:return: Statuses which indicate an issue is closed.
:rtype: list
"""
# Is the list of closed statuses cached?
if _redmine['closed_statuses'] is None:
result = requests.get('%s/issue_statuses.json' % REDMINE_URL).json()
# We've got a list of *all* statuses. Let's throw only *closed*
# statuses in the cache.
_redmine['closed_statuses'] = []
for issue_status in result['issue_statuses']:
if issue_status.get('is_closed', False):
_redmine['closed_statuses'].append(issue_status['id'])
return _redmine['closed_statuses']
def _get_redmine_bug_status_id(bug_id):
"""Fetch bug ``bug_id``.
:param int bug_id: The ID of a bug in the Redmine database.
:return: The status ID of that bug.
:raises BugFetchError: If an error occurs while fetching the bug. For
example, a network timeout occurs or the bug does not exist.
"""
if bug_id in _redmine['issues']:
LOGGER.debug('Redmine bug {0} found in cache.'.format(bug_id))
else:
# Get info about bug.
LOGGER.info('Redmine bug {0} not in cache. Fetching.'.format(bug_id))
result = requests.get(
'{0}/issues/{1}.json'.format(REDMINE_URL, bug_id)
)
if result.status_code != 200:
raise BugFetchError(
'Redmine bug {0} does not exist'.format(bug_id)
)
result = result.json()
# Place bug into cache.
try:
_redmine['issues'][bug_id] = result['issue']['status']['id']
except KeyError as err:
raise BugFetchError(
'Could not get status ID of Redmine bug {0}. Error: {1}'.
format(bug_id, err)
)
return _redmine['issues'][bug_id]
def bz_bug_is_open(bug_id, upstream=False):
"""Tell whether Bugzilla bug ``bug_id`` is open.
If information about bug ``bug_id`` cannot be fetched, the bug is assumed
to be closed.
:param bug_id: The ID of the bug being inspected.
:param bool upstream: Flag whether we run on upstream.
:return: ``True`` if the bug is open. ``False`` otherwise.
:rtype: bool
"""
bug = None
try:
bug = _get_bugzilla_bug(bug_id)
except BugFetchError as err:
LOGGER.warning(err)
return False
# NOT_FOUND, ON_QA, VERIFIED, RELEAEE_PENDING, CLOSED
if bug is None or bug.status not in BZ_OPEN_STATUSES:
return False
# running on upstream and whiteboard is 'Verified in Upstream'
elif (upstream and
bug.whiteboard and 'verified in upstream' in bug.whiteboard.lower()):
return False
# NEW, ASSIGNED, MODIFIED, POST
return True
def rm_bug_is_open(bug_id):
"""Tell whether Redmine bug ``bug_id`` is open.
If information about bug ``bug_id`` cannot be fetched, the bug is assumed
to be closed.
:param bug_id: The ID of the bug being inspected.
:return: ``True`` if the bug is open. ``False`` otherwise.
:rtype: bool
"""
status_id = None
try:
status_id = _get_redmine_bug_status_id(bug_id)
except BugFetchError as err:
LOGGER.warning(err)
if status_id is None or status_id in _redmine_closed_issue_statuses():
return False
return True
|
SeanHeuer/Rachael | refs/heads/master | bots/rachael.py | 2 | import re
import bot
class Rachael(bot.bot):
hello = re.compile(r'^Hello.*')
test = re.compile(r'^Test.*')
test2 = re.compile(r'^Hi.*')
def parse(self, sender, channel, message):
print "Sender: " + sender
print "Message: " + message
if self.hello.search(message) is not None:
self.irc.send('PRIVMSG ' + channel + ' :Hello!\r\n')
elif self.test.search(message) is not None:
self.irc.send('PRIVMSG ' + channel + ' :Indeed...\r\n')
|
dostavro/dotfiles | refs/heads/master | sublime2/Packages/SublimeCodeIntel/libs/zope/cachedescriptors/__init__.py | 9480 | #
|
jtrag/namebench | refs/heads/master | libnamebench/addr_util.py | 173 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions related to IP Addresses & Hostnames."""
# TODO(tstromberg): Investigate replacement with ipaddr library
__author__ = '[email protected] (Thomas Stromberg)'
import re
import zlib
import util
# TODO(tstromberg): Find a way to combine the following two regexps.
# Used to decide whether or not to benchmark a name
INTERNAL_RE = re.compile('^0|\.pro[md]z*\.|\.corp|\.bor|\.hot$|internal|dmz|'
'\._[ut][dc]p\.|intra|\.\w$|\.\w{5,}$', re.IGNORECASE)
# Used to decide if a hostname should be censored later.
PRIVATE_RE = re.compile('^\w+dc\.|^\w+ds\.|^\w+sv\.|^\w+nt\.|\.corp|internal|'
'intranet|\.local', re.IGNORECASE)
# ^.*[\w-]+\.[\w-]+\.[\w-]+\.[a-zA-Z]+\.$|^[\w-]+\.[\w-]{3,}\.[a-zA-Z]+\.$
FQDN_RE = re.compile('^.*\..*\..*\..*\.$|^.*\.[\w-]*\.\w{3,4}\.$|^[\w-]+\.[\w-]{4,}\.\w+\.')
IP_RE = re.compile('^[0-9.]+$')
KNOWN_SECOND_DOMAINS = [x.rstrip() for x in open(util.FindDataFile('data/second_level_domains.txt')).readlines()]
def ExtractIPsFromString(ip_string):
"""Return a tuple of ip addressed held in a string."""
ips = []
# IPV6 If this regexp is too loose, see Regexp-IPv6 in CPAN for inspiration.
ips.extend(re.findall('[\dabcdef:]+:[\dabcdef:]+', ip_string, re.IGNORECASE))
for ip in re.findall('\d+\.\d+\.\d+\.+\d+', ip_string):
# Remove any leading zeros
ips.append(re.sub('\.0(\d+)', '.\\1', ip))
return ips
def IsPrivateHostname(hostname):
"""Basic matching to determine if the hostname is likely to be 'internal'."""
if PRIVATE_RE.search(hostname):
return True
else:
return False
def GetNetworkForIp(ip, reverse=False):
"""Turn into a class C."""
if '.' in ip:
bits = ip.split('.')[0:3]
if reverse:
bits.reverse()
return '.'.join(bits)
elif ':' in ip:
print "GetNetworkForIp() does not yet support IPv6"
return None
def GetDomainFromHostname(hostname):
"""Get the domain part of a hostname."""
for second_level in KNOWN_SECOND_DOMAINS:
if hostname.lower().endswith(second_level):
custom_part = hostname.replace(second_level, '').split('.')[-1]
return '%s%s' % (custom_part, second_level)
return '.'.join(hostname.split('.')[-2:]).lower()
def GetProviderPartOfHostname(hostname):
"""Get the custom patr of a hostname"""
return GetDomainFromHostname(hostname).split('.')[0]
def IsLoopbackIP(ip):
"""Boolean check to see if an IP is private or not.
Args:
ip: str
Returns:
Boolean
"""
if ip.startswith('127.') or ip == '::1':
return True
else:
return False
def IsPrivateIP(ip):
"""Boolean check to see if an IP is private or not.
Args:
ip: str
Returns:
Number of bits that should be preserved (int, or None)
"""
if re.match('^10\.', ip):
return 1
elif re.match('^192\.168', ip):
return 2
elif re.match('^172\.(1[6-9]|2[0-9]|3[0-1])\.', ip):
return 1
else:
return None
def MaskStringWithIPs(string):
"""Mask all private IP addresses listed in a string."""
ips = ExtractIPsFromString(string)
for ip in ips:
use_bits = IsPrivateIP(ip)
if use_bits:
masked_ip = MaskIPBits(ip, use_bits)
string = string.replace(ip, masked_ip)
return string
def MaskIPBits(ip, use_bits):
"""Mask an IP, but still keep a meaningful checksum."""
ip_parts = ip.split('.')
checksum = zlib.crc32(''.join(ip_parts[use_bits:]))
masked_ip = '.'.join(ip_parts[0:use_bits])
return masked_ip + '.x-' + str(checksum)[-4:]
def MaskPrivateHost(ip, hostname, name):
"""Mask unnamed private IP's."""
# If we have a name not listed as SYS-x.x.x.x, then we're clear.
if name and ip not in name:
# Even if we are listed (Internal 192-0-1 for instance), data can leak via hostname.
if IsPrivateIP(ip):
hostname = 'internal.ip'
return (ip, hostname, name)
use_bits = IsPrivateIP(ip)
if use_bits:
masked_ip = MaskIPBits(ip, use_bits)
masked_hostname = 'internal.ip'
elif IsPrivateHostname(hostname):
masked_ip = MaskIPBits(ip, 2)
masked_hostname = 'internal.name'
else:
masked_ip = ip
masked_hostname = hostname
if 'SYS-' in name:
masked_name = 'SYS-%s' % masked_ip
else:
masked_name = ''
return (masked_ip, masked_hostname, masked_name)
if __name__ == '__main__':
print MaskStringWithIPs('10.0.0.1 has a sharing relationship with 192.168.0.1 and 8.8.8.8')
|
NatTuck/teg | refs/heads/master | python/client/tegclient.py | 2 | ## $Id: tegclient.py,v 1.6 2003/09/14 04:31:39 riq Exp $
##
## Tenes Empanadas Graciela
##
## Copyright (C) 2000,2003 Ricardo Quesada
##
## Author: Ricardo Calixto Quesada <[email protected]>
# python modules
import pygame
# teg modules
import gui
from gui import twidget_manager
from gui import ttest
import theme
class TegClient( object ):
def __init__( self ):
pygame.init()
theme.Theme()
surface = pygame.display.set_mode( (800,600) )
gui.twidget_manager.TWidgetManager.sdl_surface = surface
def run( self ):
gui.ttest.TTest().run()
if __name__ == '__main__':
aTeg = TegClient()
aTeg.run()
|
zhukaixy/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_faulthandler.py | 60 | from contextlib import contextmanager
import datetime
import faulthandler
import os
import re
import signal
import subprocess
import sys
from test import support, script_helper
from test.script_helper import assert_python_ok
import tempfile
import unittest
from textwrap import dedent
try:
import threading
HAVE_THREADS = True
except ImportError:
HAVE_THREADS = False
TIMEOUT = 0.5
def expected_traceback(lineno1, lineno2, header, min_count=1):
regex = header
regex += ' File "<string>", line %s in func\n' % lineno1
regex += ' File "<string>", line %s in <module>' % lineno2
if 1 < min_count:
return '^' + (regex + '\n') * (min_count - 1) + regex
else:
return '^' + regex + '$'
@contextmanager
def temporary_filename():
filename = tempfile.mktemp()
try:
yield filename
finally:
support.unlink(filename)
class FaultHandlerTests(unittest.TestCase):
def get_output(self, code, filename=None):
"""
Run the specified code in Python (in a new child process) and read the
output from the standard error or from a file (if filename is set).
Return the output lines as a list.
Strip the reference count from the standard error for Python debug
build, and replace "Current thread 0x00007f8d8fbd9700" by "Current
thread XXX".
"""
code = dedent(code).strip()
with support.SuppressCrashReport():
process = script_helper.spawn_python('-c', code)
stdout, stderr = process.communicate()
exitcode = process.wait()
output = support.strip_python_stderr(stdout)
output = output.decode('ascii', 'backslashreplace')
if filename:
self.assertEqual(output, '')
with open(filename, "rb") as fp:
output = fp.read()
output = output.decode('ascii', 'backslashreplace')
output = re.sub('Current thread 0x[0-9a-f]+',
'Current thread XXX',
output)
return output.splitlines(), exitcode
def check_fatal_error(self, code, line_number, name_regex,
filename=None, all_threads=True, other_regex=None):
"""
Check that the fault handler for fatal errors is enabled and check the
traceback from the child process output.
Raise an error if the output doesn't match the expected format.
"""
if all_threads:
header = 'Current thread XXX (most recent call first)'
else:
header = 'Stack (most recent call first)'
regex = """
^Fatal Python error: {name}
{header}:
File "<string>", line {lineno} in <module>
"""
regex = dedent(regex.format(
lineno=line_number,
name=name_regex,
header=re.escape(header))).strip()
if other_regex:
regex += '|' + other_regex
output, exitcode = self.get_output(code, filename)
output = '\n'.join(output)
self.assertRegex(output, regex)
self.assertNotEqual(exitcode, 0)
@unittest.skipIf(sys.platform.startswith('aix'),
"the first page of memory is a mapped read-only on AIX")
def test_read_null(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._read_null()
""",
3,
# Issue #12700: Read NULL raises SIGILL on Mac OS X Lion
'(?:Segmentation fault|Bus error|Illegal instruction)')
def test_sigsegv(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigsegv()
""",
3,
'Segmentation fault')
def test_sigabrt(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigabrt()
""",
3,
'Aborted')
@unittest.skipIf(sys.platform == 'win32',
"SIGFPE cannot be caught on Windows")
def test_sigfpe(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigfpe()
""",
3,
'Floating point exception')
@unittest.skipIf(not hasattr(faulthandler, '_sigbus'),
"need faulthandler._sigbus()")
def test_sigbus(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigbus()
""",
3,
'Bus error')
@unittest.skipIf(not hasattr(faulthandler, '_sigill'),
"need faulthandler._sigill()")
def test_sigill(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigill()
""",
3,
'Illegal instruction')
def test_fatal_error(self):
self.check_fatal_error("""
import faulthandler
faulthandler._fatal_error(b'xyz')
""",
2,
'xyz')
@unittest.skipIf(sys.platform.startswith('openbsd') and HAVE_THREADS,
"Issue #12868: sigaltstack() doesn't work on "
"OpenBSD if Python is compiled with pthread")
@unittest.skipIf(not hasattr(faulthandler, '_stack_overflow'),
'need faulthandler._stack_overflow()')
def test_stack_overflow(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._stack_overflow()
""",
3,
'(?:Segmentation fault|Bus error)',
other_regex='unable to raise a stack overflow')
def test_gil_released(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._read_null(True)
""",
3,
'(?:Segmentation fault|Bus error|Illegal instruction)')
def test_enable_file(self):
with temporary_filename() as filename:
self.check_fatal_error("""
import faulthandler
output = open({filename}, 'wb')
faulthandler.enable(output)
faulthandler._sigsegv()
""".format(filename=repr(filename)),
4,
'Segmentation fault',
filename=filename)
def test_enable_single_thread(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable(all_threads=False)
faulthandler._sigsegv()
""",
3,
'Segmentation fault',
all_threads=False)
def test_disable(self):
code = """
import faulthandler
faulthandler.enable()
faulthandler.disable()
faulthandler._sigsegv()
"""
not_expected = 'Fatal Python error'
stderr, exitcode = self.get_output(code)
stder = '\n'.join(stderr)
self.assertTrue(not_expected not in stderr,
"%r is present in %r" % (not_expected, stderr))
self.assertNotEqual(exitcode, 0)
def test_is_enabled(self):
orig_stderr = sys.stderr
try:
# regrtest may replace sys.stderr by io.StringIO object, but
# faulthandler.enable() requires that sys.stderr has a fileno()
# method
sys.stderr = sys.__stderr__
was_enabled = faulthandler.is_enabled()
try:
faulthandler.enable()
self.assertTrue(faulthandler.is_enabled())
faulthandler.disable()
self.assertFalse(faulthandler.is_enabled())
finally:
if was_enabled:
faulthandler.enable()
else:
faulthandler.disable()
finally:
sys.stderr = orig_stderr
def test_disabled_by_default(self):
# By default, the module should be disabled
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, '-E', '-c', code)
# don't use assert_python_ok() because it always enable faulthandler
output = subprocess.check_output(args)
self.assertEqual(output.rstrip(), b"False")
def test_sys_xoptions(self):
# Test python -X faulthandler
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, "-E", "-X", "faulthandler", "-c", code)
# don't use assert_python_ok() because it always enable faulthandler
output = subprocess.check_output(args)
self.assertEqual(output.rstrip(), b"True")
def test_env_var(self):
# empty env var
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, "-c", code)
env = os.environ.copy()
env['PYTHONFAULTHANDLER'] = ''
# don't use assert_python_ok() because it always enable faulthandler
output = subprocess.check_output(args, env=env)
self.assertEqual(output.rstrip(), b"False")
# non-empty env var
env = os.environ.copy()
env['PYTHONFAULTHANDLER'] = '1'
output = subprocess.check_output(args, env=env)
self.assertEqual(output.rstrip(), b"True")
def check_dump_traceback(self, filename):
"""
Explicitly call dump_traceback() function and check its output.
Raise an error if the output doesn't match the expected format.
"""
code = """
import faulthandler
def funcB():
if {has_filename}:
with open({filename}, "wb") as fp:
faulthandler.dump_traceback(fp, all_threads=False)
else:
faulthandler.dump_traceback(all_threads=False)
def funcA():
funcB()
funcA()
"""
code = code.format(
filename=repr(filename),
has_filename=bool(filename),
)
if filename:
lineno = 6
else:
lineno = 8
expected = [
'Stack (most recent call first):',
' File "<string>", line %s in funcB' % lineno,
' File "<string>", line 11 in funcA',
' File "<string>", line 13 in <module>'
]
trace, exitcode = self.get_output(code, filename)
self.assertEqual(trace, expected)
self.assertEqual(exitcode, 0)
def test_dump_traceback(self):
self.check_dump_traceback(None)
def test_dump_traceback_file(self):
with temporary_filename() as filename:
self.check_dump_traceback(filename)
def test_truncate(self):
maxlen = 500
func_name = 'x' * (maxlen + 50)
truncated = 'x' * maxlen + '...'
code = """
import faulthandler
def {func_name}():
faulthandler.dump_traceback(all_threads=False)
{func_name}()
"""
code = code.format(
func_name=func_name,
)
expected = [
'Stack (most recent call first):',
' File "<string>", line 4 in %s' % truncated,
' File "<string>", line 6 in <module>'
]
trace, exitcode = self.get_output(code)
self.assertEqual(trace, expected)
self.assertEqual(exitcode, 0)
@unittest.skipIf(not HAVE_THREADS, 'need threads')
def check_dump_traceback_threads(self, filename):
"""
Call explicitly dump_traceback(all_threads=True) and check the output.
Raise an error if the output doesn't match the expected format.
"""
code = """
import faulthandler
from threading import Thread, Event
import time
def dump():
if {filename}:
with open({filename}, "wb") as fp:
faulthandler.dump_traceback(fp, all_threads=True)
else:
faulthandler.dump_traceback(all_threads=True)
class Waiter(Thread):
# avoid blocking if the main thread raises an exception.
daemon = True
def __init__(self):
Thread.__init__(self)
self.running = Event()
self.stop = Event()
def run(self):
self.running.set()
self.stop.wait()
waiter = Waiter()
waiter.start()
waiter.running.wait()
dump()
waiter.stop.set()
waiter.join()
"""
code = code.format(filename=repr(filename))
output, exitcode = self.get_output(code, filename)
output = '\n'.join(output)
if filename:
lineno = 8
else:
lineno = 10
regex = """
^Thread 0x[0-9a-f]+ \(most recent call first\):
(?: File ".*threading.py", line [0-9]+ in [_a-z]+
){{1,3}} File "<string>", line 23 in run
File ".*threading.py", line [0-9]+ in _bootstrap_inner
File ".*threading.py", line [0-9]+ in _bootstrap
Current thread XXX \(most recent call first\):
File "<string>", line {lineno} in dump
File "<string>", line 28 in <module>$
"""
regex = dedent(regex.format(lineno=lineno)).strip()
self.assertRegex(output, regex)
self.assertEqual(exitcode, 0)
def test_dump_traceback_threads(self):
self.check_dump_traceback_threads(None)
def test_dump_traceback_threads_file(self):
with temporary_filename() as filename:
self.check_dump_traceback_threads(filename)
def _check_dump_traceback_later(self, repeat, cancel, filename, loops):
"""
Check how many times the traceback is written in timeout x 2.5 seconds,
or timeout x 3.5 seconds if cancel is True: 1, 2 or 3 times depending
on repeat and cancel options.
Raise an error if the output doesn't match the expect format.
"""
timeout_str = str(datetime.timedelta(seconds=TIMEOUT))
code = """
import faulthandler
import time
def func(timeout, repeat, cancel, file, loops):
for loop in range(loops):
faulthandler.dump_traceback_later(timeout, repeat=repeat, file=file)
if cancel:
faulthandler.cancel_dump_traceback_later()
time.sleep(timeout * 5)
faulthandler.cancel_dump_traceback_later()
timeout = {timeout}
repeat = {repeat}
cancel = {cancel}
loops = {loops}
if {has_filename}:
file = open({filename}, "wb")
else:
file = None
func(timeout, repeat, cancel, file, loops)
if file is not None:
file.close()
"""
code = code.format(
timeout=TIMEOUT,
repeat=repeat,
cancel=cancel,
loops=loops,
has_filename=bool(filename),
filename=repr(filename),
)
trace, exitcode = self.get_output(code, filename)
trace = '\n'.join(trace)
if not cancel:
count = loops
if repeat:
count *= 2
header = r'Timeout \(%s\)!\nThread 0x[0-9a-f]+ \(most recent call first\):\n' % timeout_str
regex = expected_traceback(9, 20, header, min_count=count)
self.assertRegex(trace, regex)
else:
self.assertEqual(trace, '')
self.assertEqual(exitcode, 0)
@unittest.skipIf(not hasattr(faulthandler, 'dump_traceback_later'),
'need faulthandler.dump_traceback_later()')
def check_dump_traceback_later(self, repeat=False, cancel=False,
file=False, twice=False):
if twice:
loops = 2
else:
loops = 1
if file:
with temporary_filename() as filename:
self._check_dump_traceback_later(repeat, cancel,
filename, loops)
else:
self._check_dump_traceback_later(repeat, cancel, None, loops)
def test_dump_traceback_later(self):
self.check_dump_traceback_later()
def test_dump_traceback_later_repeat(self):
self.check_dump_traceback_later(repeat=True)
def test_dump_traceback_later_cancel(self):
self.check_dump_traceback_later(cancel=True)
def test_dump_traceback_later_file(self):
self.check_dump_traceback_later(file=True)
def test_dump_traceback_later_twice(self):
self.check_dump_traceback_later(twice=True)
@unittest.skipIf(not hasattr(faulthandler, "register"),
"need faulthandler.register")
def check_register(self, filename=False, all_threads=False,
unregister=False, chain=False):
"""
Register a handler displaying the traceback on a user signal. Raise the
signal and check the written traceback.
If chain is True, check that the previous signal handler is called.
Raise an error if the output doesn't match the expected format.
"""
signum = signal.SIGUSR1
code = """
import faulthandler
import os
import signal
import sys
def func(signum):
os.kill(os.getpid(), signum)
def handler(signum, frame):
handler.called = True
handler.called = False
exitcode = 0
signum = {signum}
unregister = {unregister}
chain = {chain}
if {has_filename}:
file = open({filename}, "wb")
else:
file = None
if chain:
signal.signal(signum, handler)
faulthandler.register(signum, file=file,
all_threads={all_threads}, chain={chain})
if unregister:
faulthandler.unregister(signum)
func(signum)
if chain and not handler.called:
if file is not None:
output = file
else:
output = sys.stderr
print("Error: signal handler not called!", file=output)
exitcode = 1
if file is not None:
file.close()
sys.exit(exitcode)
"""
code = code.format(
filename=repr(filename),
has_filename=bool(filename),
all_threads=all_threads,
signum=signum,
unregister=unregister,
chain=chain,
)
trace, exitcode = self.get_output(code, filename)
trace = '\n'.join(trace)
if not unregister:
if all_threads:
regex = 'Current thread XXX \(most recent call first\):\n'
else:
regex = 'Stack \(most recent call first\):\n'
regex = expected_traceback(7, 28, regex)
self.assertRegex(trace, regex)
else:
self.assertEqual(trace, '')
if unregister:
self.assertNotEqual(exitcode, 0)
else:
self.assertEqual(exitcode, 0)
def test_register(self):
self.check_register()
def test_unregister(self):
self.check_register(unregister=True)
def test_register_file(self):
with temporary_filename() as filename:
self.check_register(filename=filename)
def test_register_threads(self):
self.check_register(all_threads=True)
def test_register_chain(self):
self.check_register(chain=True)
@contextmanager
def check_stderr_none(self):
stderr = sys.stderr
try:
sys.stderr = None
with self.assertRaises(RuntimeError) as cm:
yield
self.assertEqual(str(cm.exception), "sys.stderr is None")
finally:
sys.stderr = stderr
def test_stderr_None(self):
# Issue #21497: provide an helpful error if sys.stderr is None,
# instead of just an attribute error: "None has no attribute fileno".
with self.check_stderr_none():
faulthandler.enable()
with self.check_stderr_none():
faulthandler.dump_traceback()
if hasattr(faulthandler, 'dump_traceback_later'):
with self.check_stderr_none():
faulthandler.dump_traceback_later(1e-3)
if hasattr(faulthandler, "register"):
with self.check_stderr_none():
faulthandler.register(signal.SIGUSR1)
if __name__ == "__main__":
unittest.main()
|
Tomtomgo/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/BuildSlaveSupport/build.webkit.org-config/htdigestparser_unittest.py | 117 | # Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import os
import unittest2 as unittest
from htdigestparser import HTDigestParser
class HTDigestParserTest(unittest.TestCase):
def assertEntriesEqual(self, entries, additional_content=None):
digest_file = self.fake_htdigest_file()
if additional_content is not None:
digest_file.seek(pos=0, mode=os.SEEK_END)
digest_file.write(additional_content)
digest_file.seek(pos=0, mode=os.SEEK_SET)
self.assertEqual(entries, HTDigestParser(digest_file).entries())
def test_authenticate(self):
htdigest = HTDigestParser(self.fake_htdigest_file())
self.assertTrue(htdigest.authenticate('user1', 'realm 1', 'password1'))
self.assertTrue(htdigest.authenticate('user2', 'realm 2', 'password2'))
self.assertTrue(htdigest.authenticate('user3', 'realm 1', 'password3'))
self.assertTrue(htdigest.authenticate('user3', 'realm 3', 'password3'))
self.assertFalse(htdigest.authenticate('user1', 'realm', 'password1'))
self.assertFalse(htdigest.authenticate('user1', 'realm 2', 'password1'))
self.assertFalse(htdigest.authenticate('user2', 'realm 2', 'password1'))
self.assertFalse(htdigest.authenticate('user2', 'realm 1', 'password1'))
self.assertFalse(htdigest.authenticate('', '', ''))
def test_entries(self):
entries = [
['user1', 'realm 1', '36b8aa27fa5e9051095d37b619f92762'],
['user2', 'realm 2', '14f827686fa97778f02fe1314a3337c8'],
['user3', 'realm 1', '1817fc8a24119cc57fbafc8a630ea5a5'],
['user3', 'realm 3', 'a05f5a2335e9d87bbe75bbe5e53248f0'],
]
self.assertEntriesEqual(entries)
self.assertEntriesEqual(entries, additional_content='')
def test_empty_file(self):
self.assertEqual([], HTDigestParser(StringIO.StringIO()).entries())
def test_too_few_colons(self):
self.assertEntriesEqual([], additional_content='user1:realm 1\n')
def test_too_many_colons(self):
self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f92762:garbage\n')
def test_invalid_hash(self):
self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f92762000000\n')
self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f9276\n')
self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f9276z\n')
self.assertEntriesEqual([], additional_content='user1:realm 1: 36b8aa27fa5e9051095d37b619f92762\n')
def fake_htdigest_file(self):
return StringIO.StringIO("""user1:realm 1:36b8aa27fa5e9051095d37b619f92762
user2:realm 2:14f827686fa97778f02fe1314a3337c8
user3:realm 1:1817fc8a24119cc57fbafc8a630ea5a5
user3:realm 3:a05f5a2335e9d87bbe75bbe5e53248f0
""")
# FIXME: We should run this file as part of test-rm .
# Unfortunately test-rm currently requires that unittests
# be located in a directory with a valid module name.
# 'build.webkit.org-config' is not a valid module name (due to '.' and '-')
# so for now this is a stand-alone test harness.
if __name__ == '__main__':
unittest.main()
|
undoware/neutron-drive | refs/heads/master | google_appengine/lib/django_1_3/django/contrib/webdesign/__init__.py | 12133432 | |
eduNEXT/edx-platform | refs/heads/master | openedx/core/lib/x_forwarded_for/__init__.py | 12133432 | |
gannetson/django | refs/heads/master | tests/gis_tests/geoapp/__init__.py | 12133432 | |
emijrp/pywikibot-core | refs/heads/master | pywikibot/families/battlestarwiki_family.py | 5 | # -*- coding: utf-8 -*-
"""Family module for Battlestar Wiki."""
from __future__ import unicode_literals
__version__ = '$Id$'
from pywikibot import family
# The Battlestar Wiki family, a set of Battlestar wikis.
# http://battlestarwiki.org/
class Family(family.Family):
"""Family class for Battlestar Wiki."""
name = 'battlestarwiki'
languages_by_size = ['en', 'de']
interwiki_removals = ['fr', 'zh', 'es', 'ms', 'tr', 'simple']
langs = dict([(lang, '%s.battlestarwiki.org' % lang)
for lang in languages_by_size])
|
handroissuazo/tensorflow | refs/heads/master | tensorflow/contrib/tfprof/python/tools/tfprof/model_analyzer_test.py | 7 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.contrib.tfprof.python.tools.tfprof import model_analyzer
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [3, 3, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
kernel = variable_scope.get_variable(
'DW2', [2, 2, 6, 12],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(x, kernel, [1, 2, 2, 1], padding='SAME')
return x
def testDumpToFile(self):
opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
opts['dump_to_file'] = os.path.join(test.get_temp_dir(), 'dump')
with session.Session() as sess, ops.device('/cpu:0'):
_ = self._BuildSmallModel()
model_analyzer.print_model_analysis(sess.graph, tfprof_options=opts)
with gfile.Open(opts['dump_to_file'], 'r') as f:
self.assertEqual(u'_TFProfRoot (--/450 params)\n'
' DW (3x3x3x6, 162/162 params)\n'
' DW2 (2x2x6x12, 288/288 params)\n',
f.read())
def testSelectEverything(self):
opts = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
opts['dump_to_file'] = os.path.join(test.get_temp_dir(), 'dump')
opts['account_type_regexes'] = ['.*']
opts['select'] = [
'bytes', 'params', 'float_ops', 'num_hidden_ops', 'device', 'op_types'
]
with session.Session() as sess, ops.device('/cpu:0'):
x = self._BuildSmallModel()
sess.run(variables.global_variables_initializer())
run_meta = config_pb2.RunMetadata()
_ = sess.run(x,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
model_analyzer.print_model_analysis(
sess.graph, run_meta, tfprof_options=opts)
with gfile.Open(opts['dump_to_file'], 'r') as f:
# pylint: disable=line-too-long
self.assertEqual(
'_TFProfRoot (0/450 params, 0/10.44k flops, 0B/5.28KB, _kTFScopeParent)\n Conv2D (0/0 params, 5.83k/5.83k flops, 432B/432B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Conv2D)\n Conv2D_1 (0/0 params, 4.61k/4.61k flops, 384B/384B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Conv2D)\n DW (3x3x3x6, 162/162 params, 0/0 flops, 648B/1.30KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|VariableV2|_trainable_variables)\n DW/Assign (0/0 params, 0/0 flops, 0B/0B, /device:CPU:0, /device:CPU:0|Assign)\n DW/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n DW/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n DW/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n DW/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n DW/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n DW/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n DW/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n DW/read (0/0 params, 0/0 flops, 648B/648B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Identity)\n DW2 (2x2x6x12, 288/288 params, 0/0 flops, 1.15KB/2.30KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|VariableV2|_trainable_variables)\n DW2/Assign (0/0 params, 0/0 flops, 0B/0B, /device:CPU:0, /device:CPU:0|Assign)\n DW2/Initializer (0/0 params, 0/0 flops, 0B/0B, _kTFScopeParent)\n DW2/Initializer/random_normal (0/0 params, 0/0 flops, 0B/0B, Add)\n DW2/Initializer/random_normal/RandomStandardNormal (0/0 params, 0/0 flops, 0B/0B, RandomStandardNormal)\n DW2/Initializer/random_normal/mean (0/0 params, 0/0 flops, 0B/0B, Const)\n DW2/Initializer/random_normal/mul (0/0 params, 0/0 flops, 0B/0B, Mul)\n DW2/Initializer/random_normal/shape (0/0 params, 0/0 flops, 0B/0B, Const)\n DW2/Initializer/random_normal/stddev (0/0 params, 0/0 flops, 0B/0B, Const)\n DW2/read (0/0 params, 0/0 flops, 1.15KB/1.15KB, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Identity)\n init (0/0 params, 0/0 flops, 0B/0B, /device:CPU:0, /device:CPU:0|NoOp)\n zeros (0/0 params, 0/0 flops, 864B/864B, /job:localhost/replica:0/task:0/cpu:0, /job:localhost/replica:0/task:0/cpu:0|Const)\n',
f.read())
# pylint: enable=line-too-long
if __name__ == '__main__':
test.main()
|
lepistone/odoo | refs/heads/master | addons/analytic_user_function/__openerp__.py | 119 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Jobs on Contracts',
'version': '1.0',
'category': 'Sales Management',
'description': """
This module allows you to define what is the default function of a specific user on a given account.
====================================================================================================
This is mostly used when a user encodes his timesheet: the values are retrieved
and the fields are auto-filled. But the possibility to change these values is
still available.
Obviously if no data has been recorded for the current account, the default
value is given as usual by the employee data so that this module is perfectly
compatible with older configurations.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/analytic_user_function.jpeg'],
'depends': ['hr_timesheet_sheet'],
'data': ['analytic_user_function_view.xml', 'security/ir.model.access.csv'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
yojota/volatility | refs/heads/master | volatility/plugins/linux/vma_cache.py | 58 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
from volatility.plugins.linux.slab_info import linux_slabinfo
class linux_vma_cache(linux_common.AbstractLinuxCommand):
"""Gather VMAs from the vm_area_struct cache"""
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('UNALLOCATED', short_option = 'u',
default = False,
help = 'Show unallocated',
action = 'store_true')
def calculate(self):
linux_common.set_plugin_members(self)
has_owner = self.profile.obj_has_member("mm_struct", "owner")
cache = linux_slabinfo(self._config).get_kmem_cache("vm_area_struct", self._config.UNALLOCATED)
for vm in cache:
start = vm.vm_start
end = vm.vm_end
if has_owner and vm.vm_mm and vm.vm_mm.is_valid():
task = vm.vm_mm.owner
(task_name, pid) = (task.comm, task.pid)
else:
(task_name, pid) = ("", "")
if vm.vm_file and vm.vm_file.is_valid():
path = vm.vm_file.dentry.get_partial_path()
else:
path = ""
yield task_name, pid, start, end, path
def render_text(self, outfd, data):
self.table_header(outfd, [("Process", "16"),
("PID", "6"),
("Start", "[addrpad]"),
("End", "[addrpad]"),
("Path", "")])
for task_name, pid, start, end, path in data:
self.table_row(outfd, task_name, pid, start, end, path)
|
derDavidT/sympy | refs/heads/master | sympy/matrices/expressions/determinant.py | 92 | from __future__ import print_function, division
from sympy import Basic, Expr, S, sympify
from .matexpr import ShapeError
class Determinant(Expr):
"""Matrix Determinant
Represents the determinant of a matrix expression.
>>> from sympy import MatrixSymbol, Determinant, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Determinant(A)
Determinant(A)
>>> Determinant(eye(3)).doit()
1
"""
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("Input to Determinant, %s, not a matrix" % str(mat))
if not mat.is_square:
raise ShapeError("Det of a non-square matrix")
return Basic.__new__(cls, mat)
@property
def arg(self):
return self.args[0]
def doit(self, expand=False):
try:
return self.arg._eval_determinant()
except (AttributeError, NotImplementedError):
return self
def det(matexpr):
""" Matrix Determinant
>>> from sympy import MatrixSymbol, det, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> det(A)
Determinant(A)
>>> det(eye(3))
1
"""
return Determinant(matexpr).doit()
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_Determinant(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine, det
>>> X = MatrixSymbol('X', 2, 2)
>>> det(X)
Determinant(X)
>>> with assuming(Q.orthogonal(X)):
... print(refine(det(X)))
1
"""
if ask(Q.orthogonal(expr.arg), assumptions):
return S.One
elif ask(Q.singular(expr.arg), assumptions):
return S.Zero
elif ask(Q.unit_triangular(expr.arg), assumptions):
return S.One
return expr
handlers_dict['Determinant'] = refine_Determinant
|
wwj718/murp-edx | refs/heads/master | common/djangoapps/external_auth/djangostore.py | 224 | """A openid store using django cache"""
from openid.store.interface import OpenIDStore
from openid.store import nonce
from django.core.cache import cache
import logging
import time
DEFAULT_ASSOCIATIONS_TIMEOUT = 60
DEFAULT_NONCE_TIMEOUT = 600
ASSOCIATIONS_KEY_PREFIX = 'openid.provider.associations.'
NONCE_KEY_PREFIX = 'openid.provider.nonce.'
log = logging.getLogger('DjangoOpenIDStore')
def get_url_key(server_url):
key = ASSOCIATIONS_KEY_PREFIX + server_url
return key
def get_nonce_key(server_url, timestamp, salt):
key = '{prefix}{url}.{ts}.{salt}'.format(prefix=NONCE_KEY_PREFIX,
url=server_url,
ts=timestamp,
salt=salt)
return key
class DjangoOpenIDStore(OpenIDStore):
def __init__(self):
log.info('DjangoStore cache:' + str(cache.__class__))
def storeAssociation(self, server_url, assoc):
key = get_url_key(server_url)
log.info('storeAssociation {0}'.format(key))
associations = cache.get(key, {})
associations[assoc.handle] = assoc
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
def getAssociation(self, server_url, handle=None):
key = get_url_key(server_url)
log.info('getAssociation {0}'.format(key))
associations = cache.get(key, {})
assoc = None
if handle is None:
# get best association
valid_assocs = [a for a in associations if a.getExpiresIn() > 0]
if valid_assocs:
valid_assocs.sort(lambda a: a.getExpiresIn(), reverse=True)
assoc = valid_assocs.sort[0]
else:
assoc = associations.get(handle)
# check expiration and remove if it has expired
if assoc and assoc.getExpiresIn() <= 0:
if handle is None:
cache.delete(key)
else:
associations.pop(handle)
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
assoc = None
return assoc
def removeAssociation(self, server_url, handle):
key = get_url_key(server_url)
log.info('removeAssociation {0}'.format(key))
associations = cache.get(key, {})
removed = False
if associations:
if handle is None:
cache.delete(key)
removed = True
else:
assoc = associations.pop(handle, None)
if assoc:
cache.set(key, associations, DEFAULT_ASSOCIATIONS_TIMEOUT)
removed = True
return removed
def useNonce(self, server_url, timestamp, salt):
key = get_nonce_key(server_url, timestamp, salt)
log.info('useNonce {0}'.format(key))
if abs(timestamp - time.time()) > nonce.SKEW:
return False
anonce = cache.get(key)
found = False
if anonce is None:
cache.set(key, '-', DEFAULT_NONCE_TIMEOUT)
found = False
else:
found = True
return found
def cleanupNonces(self):
# not necesary, keys will timeout
return 0
def cleanupAssociations(self):
# not necesary, keys will timeout
return 0
|
zahodi/ansible | refs/heads/devel | lib/ansible/modules/cloud/ovirt/ovirt_datacenters.py | 18 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_datacenters
short_description: Module to manage data centers in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage data centers in oVirt"
options:
name:
description:
- "Name of the the data center to manage."
required: true
state:
description:
- "Should the data center be present or absent"
choices: ['present', 'absent']
default: present
description:
description:
- "Description of the data center."
comment:
description:
- "Comment of the data center."
local:
description:
- "I(True) if the data center should be local, I(False) if should be shared."
- "Default value is set by engine."
compatibility_version:
description:
- "Compatibility version of the data center."
quota_mode:
description:
- "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)"
choices: ['disabled', 'audit', 'enabled']
mac_pool:
description:
- "MAC pool to be used by this datacenter."
- "IMPORTANT: This option is deprecated in oVirt 4.1. You should
use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are
set per cluster since 4.1."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create datacenter
- ovirt_datacenters:
name: mydatacenter
local: True
compatibility_version: 4.0
quota_mode: enabled
# Remove datacenter
- ovirt_datacenters:
state: absent
name: mydatacenter
'''
RETURN = '''
id:
description: "ID of the managed datacenter"
returned: "On success if datacenter is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
data_center:
description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/datacenter."
returned: "On success if datacenter is found."
'''
class DatacentersModule(BaseModule):
def __get_major(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.major
return int(full_version.split('.')[0])
def __get_minor(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.minor
return int(full_version.split('.')[1])
def _get_mac_pool(self):
mac_pool = None
if self._module.params.get('mac_pool'):
mac_pool = search_by_name(
self._connection.system_service().mac_pools_service(),
self._module.params.get('mac_pool'),
)
return mac_pool
def build_entity(self):
return otypes.DataCenter(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
mac_pool=otypes.MacPool(
id=getattr(self._get_mac_pool(), 'id', None),
) if self._module.params.get('mac_pool') else None,
quota_mode=otypes.QuotaModeType(
self._module.params['quota_mode']
) if self._module.params['quota_mode'] else None,
local=self._module.params['local'],
version=otypes.Version(
major=self.__get_major(self._module.params['compatibility_version']),
minor=self.__get_minor(self._module.params['compatibility_version']),
) if self._module.params['compatibility_version'] else None,
)
def update_check(self, entity):
minor = self.__get_minor(self._module.params.get('compatibility_version'))
major = self.__get_major(self._module.params.get('compatibility_version'))
return (
equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and
equal(self._module.params.get('local'), entity.local) and
equal(minor, self.__get_minor(entity.version)) and
equal(major, self.__get_major(entity.version))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
description=dict(default=None),
local=dict(type='bool'),
compatibility_version=dict(default=None),
quota_mode=dict(choices=['disabled', 'audit', 'enabled']),
comment=dict(default=None),
mac_pool=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
connection = create_connection(module.params.pop('auth'))
data_centers_service = connection.system_service().data_centers_service()
clusters_module = DatacentersModule(
connection=connection,
module=module,
service=data_centers_service,
)
state = module.params['state']
if state == 'present':
ret = clusters_module.create()
elif state == 'absent':
ret = clusters_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == "__main__":
main()
|
wilsonmar/md-to-toc | refs/heads/master | md-to-toc.py | 1 | # Author: Antonio Maiorano ([email protected])
import os
import sys
import re
TOC_LIST_PREFIX = "-"
# TOC_LIST_PREFIX = "*"
HEADER_LINE_RE = re.compile("^(#+)\s*(.*?)\s*(#+$|$)", re.IGNORECASE)
# Dictionary of anchor name to number of instances found so far
anchors = {}
def print_usage():
print("\nUsage: md-to-toc <markdown_file>")
def to_github_anchor(title):
'''
Converts markdown header title (without #s) to GitHub-formatted anchor.
Note that this function attempts to recreate GitHub's anchor-naming logic.
'''
# Convert to lower case and replace spaces with dashes
anchor_name = title.strip().lower().replace(' ', '-')
# Strip all invalid characters
anchor_name = re.sub("[^A-Za-z0-9\-_]", "", anchor_name)
# If we've encountered this anchor name before, append next instance count
count = anchors.get(anchor_name)
if count == None:
anchors[anchor_name] = 0
else:
count = count + 1
anchors[anchor_name] = count
anchor_name = anchor_name + '-' + str(count)
return '#' + anchor_name
def toggles_block_quote(line):
'''Returns true if line toggles block quotes on or off (i.e. finds odd number of ```)'''
n = line.count("```")
return n > 0 and line.count("```") % 2 != 0
def main(argv = None):
if argv is None:
argv = sys.argv
if len(argv) < 2:
print_usage()
return 0
filespec = argv[1]
in_block_quote = False
results = [] # list of (header level, title, anchor) tuples
file = open(filespec)
for line in file.readlines():
if toggles_block_quote(line):
in_block_quote = not in_block_quote;
if in_block_quote:
continue
m = HEADER_LINE_RE.match(line)
if m != None:
header_level = len(m.group(1))
title = m.group(2)
spaces = " " * (header_level - 1)
results.append( (header_level, title, to_github_anchor(title)) )
# Compute min header level so we can offset output to be flush with left edge
min_header_level = min(results, key=lambda e: e[0])[0]
for r in results:
header_level = r[0]
spaces = " " * (header_level - min_header_level)
print("{}{} [{}]({})".format(spaces, TOC_LIST_PREFIX, r[1], r[2]))
if __name__ == "__main__":
sys.exit(main())
|
Vvucinic/Wander | refs/heads/master | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/contrib/gis/db/backends/spatialite/models.py | 510 | """
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.contrib.gis.db.backends.spatialite.base import DatabaseWrapper
from django.db import connection, models
from django.db.backends.signals import connection_created
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialiteGeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialiteSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
if hasattr(self, 'srtext'):
return self.srtext
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
def add_spatial_version_related_fields(sender, **kwargs):
"""
Adds fields after establishing a database connection to prevent database
operations at compile time.
"""
if connection_created.disconnect(add_spatial_version_related_fields, sender=DatabaseWrapper):
spatial_version = connection.ops.spatial_version[0]
if spatial_version >= 4:
SpatialiteSpatialRefSys.add_to_class('srtext', models.CharField(max_length=2048))
SpatialiteGeometryColumns.add_to_class('type', models.IntegerField(db_column='geometry_type'))
else:
SpatialiteGeometryColumns.add_to_class('type', models.CharField(max_length=30))
connection_created.connect(add_spatial_version_related_fields, sender=DatabaseWrapper)
|
jamtot/DailyChallenge | refs/heads/master | hyperbinary (04mar2015)/hype.py | 1 | import re
def dec2bin(dec):
bn = ''
count,n=1,1
while n*2 < dec:
n*=2
count+=1
while n >= 1:
if dec/n > 0:
dec%=n
bn+='1'
else: bn+='0'
n/=2
return bn
# using 10 = 02 and 20 = 12
def dec2hyp(dec):
hyperbinarys = [dec2bin(dec)]
b10 = re.compile('10')
b20 = re.compile('20')
for hybi in hyperbinarys:
for match in b10.finditer(hybi):
hb = hybi[:match.start()] + '02' + hybi[match.end():]
while hb[0] == '0':
hb = hb[1:]
if hb not in hyperbinarys:
hyperbinarys.append(hb)
for match in b20.finditer(hybi):
hb = hybi[:match.start()] + '12' + hybi[match.end():]
if hb not in hyperbinarys:
hyperbinarys.append(hb)
print hybi
return len(hyperbinarys)
if __name__ == "__main__":
print dec2hyp(18)
print dec2hyp(73)
# dec2hyp(12345678910) # gives 106851 after a while
|
danmcp/origin | refs/heads/master | vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/print_util.py | 35 | # Utilities for printing ASN.1 values
def bits_to_hex(bit_array, delimiter=":"):
"""Convert a bit array to a prettily formated hex string. If the array
length is not a multiple of 8, it is padded with 0-bits from the left.
For example, [1,0,0,1,1,0,1,0,0,1,0] becomes 04:d2.
Args:
bit_array: the bit array to convert
Returns:
the formatted hex string."""
# Pad the first partial byte.
partial_bits = len(bit_array) % 8
pad_length = 8 - partial_bits if partial_bits else 0
bitstring = "0"*pad_length + "".join(map(str, bit_array))
byte_array = [int(bitstring[i:i+8], 2) for i in range(0, len(bitstring), 8)]
return delimiter.join(map(lambda x: "%02x" % x, byte_array))
return bytes_to_hex(byte_array, delimiter=delimiter)
def bytes_to_hex(byte_string, delimiter=":"):
"""Convert a bytestring to a prettily formated hex string: for example,
'\x04\xd2' becomes 04:d2.
Args:
byte_string: the bytes to convert.
Returns:
the formatted hex string."""
return delimiter.join([("%02x" % ord(b)) for b in byte_string])
def int_to_hex(int_value, delimiter=":"):
"""Convert an integer to a prettily formated hex string: for example,
1234 (0x4d2) becomes 04:d2 and -1234 becomes ' -:04:d2'
Args:
int_value: the value to convert.
Returns:
the formatted hex string."""
hex_string = "%x" % int_value
ret = ""
pos = 0
# Accommodate for negative integers.
if hex_string[0] == '-':
ret += ' -' + delimiter
hex_string = hex_string[1:]
# If the first digit is a half-byte, pad with a 0.
remaining_len = len(hex_string) - pos
hex_string = hex_string.zfill(remaining_len + remaining_len % 2)
byte_values = [hex_string[i:i+2] for i in range(pos, len(hex_string), 2)]
return ret + delimiter.join(byte_values)
def wrap_lines(long_string, wrap):
"""Split the long string into line chunks according to the wrap limit and
existing newlines.
Args:
long_string: a long, possibly multiline string
wrap: maximum number of characters per line. 0 or negative
wrap means no limit.
Returns:
a list of lines of at most |wrap| characters each."""
if not long_string:
return []
long_lines = long_string.split('\n')
if wrap <= 0:
return long_lines
ret = []
for line in long_lines:
if not line:
# Empty line
ret += [line]
else:
ret += [line[i:i+wrap] for i in range(0, len(line), wrap)]
return ret
def append_lines(lines, wrap, buf):
"""Append lines to the buffer. If the first line can be appended to the last
line of the buf without exceeding wrap characters, the two lines are merged.
Args:
lines: an iterable of lines to append
wrap: maximum number of characters per line. 0 or negative wrap means
no limit.
buf: an iterable of lines to append to"""
if not lines:
return
if not buf or wrap > 0 and len(buf[-1]) + len(lines[0]) > wrap:
buf += lines
else:
buf[-1] += lines[0]
buf += lines[1:]
|
michelts/aloe_django | refs/heads/master | tests/integration/django/alfaces/__init__.py | 9480 | #
|
pombredanne/teamwork | refs/heads/master | w2/static/Brython2.0.0-20140209-164925/Lib/xml/dom/minicompat.py | 781 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
|
valgur/numpyson | refs/heads/master | test_numpyson.py | 1 | import datetime as dt
from functools import partial
import inspect
from multiprocessing import Process, Queue
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_index_equal, assert_series_equal, assert_frame_equal
from numpy.testing import assert_equal
assert_series_equal_strict = partial(assert_series_equal, check_dtype=True, check_index_type=True,
check_series_type=True, check_less_precise=False)
assert_frame_equal_strict = partial(assert_frame_equal, check_dtype=True, check_index_type=True,
check_column_type=True, check_frame_type=True, check_less_precise=False,
check_names=True)
from numpyson import dumps, loads, build_index_handler_for_type
def test_version():
import numpyson
assert numpyson.__version__
def decode_in_subproc(q, encoded_object):
obj = loads(encoded_object)
# Multiprocessing is not capable of pickling NumPy arrays so a string representation is returned instead
if isinstance(obj, dict):
# Dictionary items are not ordered so we need to sort them to get a unique repr
output_str = repr(sorted(obj.items()))
else:
output_str = repr(obj)
q.put(output_str)
def _test_in_subprocess(object):
queue = Queue()
encoded = dumps(object)
p = Process(target=decode_in_subproc, args=(queue, encoded))
p.start()
p.join(timeout=3) # this blocks until the process terminates
result = queue.get()
if isinstance(object, dict):
assert_equal(repr(sorted(object.items())), result)
else:
assert_equal(repr(object), result)
return result
@pytest.mark.parametrize('arr_before', [
np.array([1, 2, 3]),
np.array([1., 2., 3.]),
np.array(['foo', 'bar', 'baz']),
np.array([dt.datetime(1970, 1, 1, 12, 57), dt.datetime(1970, 1, 1, 12, 58), dt.datetime(1970, 1, 1, 12, 59)]),
np.array([dt.date(1970, 1, 1), dt.date(1970, 1, 2), dt.date(1970, 1, 3)]),
np.array([True, False, True]),
np.arange(10).T,
np.array([[1, 4, 7], [2, 5, 8], [3, 6, 9]]),
np.array([[[1., 10.], [4., 40.], [7., 70.]], [[2., 20.], [5., 50.], [8., 80.]], [[3., 30.], [6., 60.], [9., 90.]]]),
np.reshape(np.arange(100), (10, 10)),
np.reshape(np.arange(100).T, (10, 10)),
])
def test_numpy_array_handler(arr_before):
buf = dumps(arr_before)
arr_after = loads(buf)
assert_equal(arr_before, arr_after)
_test_in_subprocess(arr_before)
def test_nested_array():
data_before = {"1": np.array([1, 2])}
buf = dumps(data_before)
arr_after = loads(buf)
assert_equal(data_before, arr_after)
_test_in_subprocess(data_before)
@pytest.mark.parametrize('ts_before', [
pd.Series([1, 2, 3], index=[0, 1, 2]),
pd.Series([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='S')),
pd.Series([1., 2., 3.], pd.date_range('1970-01-01', periods=3, freq='D')),
pd.Series([dt.datetime(1970, 1, 1, 12, 57), dt.datetime(1970, 1, 1, 12, 58)],
pd.date_range('1970-01-01', periods=2, freq='D'))
])
def test_pandas_timeseries_handler(ts_before):
buf = dumps(ts_before)
ts_after = loads(buf)
assert_series_equal_strict(ts_before, ts_after)
_test_in_subprocess(ts_before)
@pytest.mark.parametrize('index_before', [
pd.Index([0, 1, 2]),
pd.Index([0., 1., 2.]), # not sure why you would want to index by floating point numbers; here for completeness
pd.Index(['a', 'b', 'c']),
pd.Index([dt.datetime(1970, 1, 1, 12, 57), dt.datetime(1970, 1, 1, 12, 58)])
])
def test_pandas_index_handler(index_before):
buf = dumps(index_before)
index_after = loads(buf)
assert_index_equal(index_before, index_after)
_test_in_subprocess(index_before)
@pytest.mark.parametrize('index_before', [
pd.date_range('1970-01-01', periods=3, freq='S'),
pd.date_range('1970-01-01', periods=3, freq='D'),
])
def test_pandas_datetime_index_handler(index_before):
buf = dumps(index_before)
index_after = loads(buf)
assert_index_equal(index_before, index_after)
_test_in_subprocess(index_before)
@pytest.mark.parametrize('data_before', [
{"1": pd.date_range('1970-01-01', periods=3, freq='S')},
{"1": pd.date_range('1970-01-01', periods=3, freq='D')},
])
def test_datetime_index_nested(data_before):
buf = dumps(data_before)
data_after = loads(buf)
assert_index_equal(data_before["1"], data_after["1"])
_test_in_subprocess(data_before)
TEST_DATA_FRAMES = (
pd.DataFrame({0: [1, 2, 3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=[0, 1, 2]),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame({0: [1, 2, 3], 1: [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='D')),
pd.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3]}, index=pd.date_range('1970-01-01', periods=3, freq='D')),
pd.DataFrame({
'i': [1, 2, 3],
'f': [1.1, 2.2, 3.3],
'd': [dt.datetime(1970, 1, 1, 12, 57), dt.datetime(1970, 1, 1, 12, 58), dt.datetime(1970, 1, 1, 12, 59)],
's': ['ham', 'spam', 'eggs'],
'b': [True, False, True],
'o': [{'a': 1}, {'b': 2}, {'c': 3}],
},
index=pd.date_range('1970-01-01', periods=3, freq='S')),
pd.DataFrame(np.ones(shape=(10,15)), index=pd.date_range('1970-01-01', periods=10))
)
@pytest.mark.parametrize('df_before', TEST_DATA_FRAMES)
def test_pandas_dataframe_handler(df_before):
buf = dumps(df_before)
df_after = loads(buf)
assert_frame_equal_strict(df_before, df_after)
_test_in_subprocess(df_before)
def test_mixed_python_and_pandas_types():
data_before = TEST_DATA_FRAMES
buf = dumps(data_before)
data_after = loads(buf)
assert isinstance(data_after, tuple)
assert len(data_after) == len(TEST_DATA_FRAMES)
assert len(data_before) == len(data_after)
for df_before, df_after in zip(data_before, data_after):
assert_frame_equal_strict(df_before, df_after)
def test_build_index_handler_for_type():
for index_class in ():
handler_cls = build_index_handler_for_type(index_class)
assert inspect.isclass(handler_cls)
assert hasattr(handler_cls, 'flatten')
assert hasattr(handler_cls, 'restore')
with pytest.raises(TypeError):
build_index_handler_for_type(pd.DatetimeIndex)
with pytest.raises(TypeError):
build_index_handler_for_type(pd.TimeSeries)
@pytest.mark.xfail(reason='failing preserve underlying array state when it is wrapped inside a Pandas object')
def test_preservation_of_specific_array_ordering():
df_c = pd.DataFrame(np.array([[1,2],[3,4], [5,6]], order='C'))
df_c_after = loads(dumps(df_c))
assert_frame_equal_strict(df_c, df_c_after)
assert_equal(df_c.values, df_c_after.values)
assert not df_c.values.flags.fortran
assert not df_c_after.values.flags.fortran
_test_in_subprocess(df_c)
df_f = pd.DataFrame(np.array([[1,2],[3,4], [5,6]], order='F'))
df_f_after = loads(dumps(df_f))
assert_frame_equal_strict(df_f, df_f_after)
assert_equal(df_f.values, df_f_after.values)
assert df_f.values.flags.fortran
assert df_f_after.values.flags.fortran
_test_in_subprocess(df_f)
def test_preservation_of_specific_array_ordering_simple():
arr_c = np.array([[1,2],[3,4], [5,6]], order='C')
arr_f = np.array([[1,2],[3,4], [5,6]], order='F')
assert_equal(arr_c, arr_f)
assert arr_c.strides != arr_f.strides
# C array ordering
arr_c_after = loads(dumps(arr_c))
assert arr_c.strides == arr_c_after.strides
assert not arr_c.flags.fortran
assert not arr_c_after.flags.fortran
assert_equal(arr_c, arr_c_after)
_test_in_subprocess(arr_c)
# Fortran array order
arr_f_after = loads(dumps(arr_f))
assert arr_f.strides == arr_f_after.strides
assert arr_f.flags.fortran
assert arr_f_after.flags.fortran
assert_equal(arr_f, arr_f_after)
_test_in_subprocess(arr_f)
@pytest.mark.parametrize("val", [np.float64(4.2), np.int64(5)])
def test_number(val):
dumped = dumps(val)
loaded = loads(dumped)
assert loaded == val
assert type(loaded) == type(val)
_test_in_subprocess(val)
def test_datetime_identity():
import datetime
date = datetime.datetime(2013, 11, 1, 0, 0)
val = {
'start': date,
'end': date,
'd': {"ttf": pd.TimeSeries([1.],
pd.date_range("1970-1-1", periods=1, freq='S'))
}
}
dumped = dumps(val)
loaded = loads(dumped)
assert loaded["start"] == val["start"], dumped
assert loaded["end"] == val["end"]
assert loaded["end"] == val["end"]
_test_in_subprocess(val)
|
almeidapaulopt/erpnext | refs/heads/develop | erpnext/hr/doctype/training_program/__init__.py | 12133432 | |
bregydoc/detechAlgorithm | refs/heads/master | core/registrationOfImages.py | 1 | import argparse
from os import listdir
import registro1 as reg
import images_operations as ops
from PIL import Image
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='Input folder', required=True)
parser.add_argument('-o', '--output', help='output folder', required=True)
args = parser.parse_args()
print args
inputFolder = args.input
outputFolder = args.output
files = [f for f in listdir(inputFolder)]
mediumImage, nameOfMediumImage = ops.calculateTheMostNearImageToMeanImage(inputFolder)
ops.flipAllBTypeImagesFromFolder(inputFolder, 'y')
for f in files:
# img = np.asarray(Image.open(inputFolder+f))
# protoImg = reg.RigidRegistration(mediumImage, img, 'correlation', 'grad-desc')
protoImg = reg.RigidRegistration(inputFolder+nameOfMediumImage, inputFolder+f, 'correlation', 'grad-desc')
registeredImage = Image.fromarray(protoImg, 'L')
if outputFolder[-1] == '/':
# registeredImage.save(outputFolder+f)
cv2.imwrite(outputFolder+f, protoImg)
else:
# registeredImage.save(outputFolder+'/'+f)
cv2.imwrite(outputFolder+'/'+f, protoImg) |
keedio/hue | refs/heads/master | desktop/core/ext-py/lxml/ez_setup.py | 358 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
ujjwalwahi/odoo | refs/heads/8.0 | addons/base_action_rule/tests/base_action_rule_test.py | 395 | from openerp import SUPERUSER_ID
from openerp.tests import common
from .. import test_models
class base_action_rule_test(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(base_action_rule_test, self).setUp()
cr, uid = self.cr, self.uid
self.demo = self.registry('ir.model.data').get_object(cr, uid, 'base', 'user_demo').id
self.admin = SUPERUSER_ID
self.model = self.registry('base.action.rule.lead.test')
self.base_action_rule = self.registry('base.action.rule')
def create_filter_done(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in done state",
'is_default': False,
'model_id': 'base.action.rule.lead.test',
'domain': "[('state','=','done')]",
}, context=context)
def create_filter_draft(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in draft state",
'is_default': False,
'model_id': "base.action.rule.lead.test",
'domain' : "[('state','=','draft')]",
}, context=context)
def create_lead_test_1(self, cr, uid, context=None):
"""
Create a new lead_test
"""
return self.model.create(cr, uid, {
'name': "Lead Test 1",
'user_id': self.admin,
}, context=context)
def create_rule(self, cr, uid, kind, filter_id=False, filter_pre_id=False, context=None):
"""
The "Rule 1" says that when a lead goes to the 'draft' state, the responsible for that lead changes to user "demo"
"""
return self.base_action_rule.create(cr,uid,{
'name': "Rule 1",
'model_id': self.registry('ir.model').search(cr, uid, [('model','=','base.action.rule.lead.test')], context=context)[0],
'kind': kind,
'filter_pre_id': filter_pre_id,
'filter_id': filter_id,
'act_user_id': self.demo,
}, context=context)
def delete_rules(self, cr, uid, context=None):
""" delete all the rules on model 'base.action.rule.lead.test' """
action_ids = self.base_action_rule.search(cr, uid, [('model', '=', self.model._name)], context=context)
return self.base_action_rule.unlink(cr, uid, action_ids, context=context)
def test_00_check_to_state_draft_pre(self):
"""
Check that a new record (with state = draft) doesn't change its responsible when there is a precondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_01_check_to_state_draft_post(self):
"""
Check that a new record changes its responsible when there is a postcondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_create')
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_with_steps(self):
"""
A new record will be created and will goes from draft to done state via the other states (open, pending and cancel)
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will never change
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to open and check that responsible doen't change"""
new_lead.write({'state': 'open'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'open')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to pending and check that responsible doen't change"""
new_lead.write({'state': 'pending'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'pending')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to cancel and check that responsible doen't change"""
new_lead.write({'state': 'cancel'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'cancel')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible doen't change """
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_without_steps(self):
"""
A new record will be created and will goes from draft to done in one operation
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will change to user "demo"
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible change to Demo_user"""
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
|
gamechanger/docker-py | refs/heads/master | docker/utils/__init__.py | 21 | from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
mkbuildcontext, tar, parse_repository_tag, parse_host,
kwargs_from_env, convert_filters, create_host_config,
create_container_config, parse_bytes, ping_registry
) # flake8: noqa
from .types import Ulimit, LogConfig # flake8: noqa
from .decorators import check_resource #flake8: noqa
|
dreamsxin/ubuntu-tweak | refs/heads/master | ubuntutweak/common/inifile.py | 5 | """
Base Class for DesktopEntry, IconTheme and IconData
"""
import os.path
import codecs
class IniFile:
filename = ''
def __init__(self, filename=None):
self.content = dict()
if filename:
self.parse(filename)
def parse(self, filename):
# for performance reasons
content = self.content
if not os.path.isfile(filename):
return
# parse file
try:
file(filename, 'r')
except IOError:
return
for line in file(filename,'r'):
line = line.strip()
# empty line
if not line:
continue
# comment
elif line[0] == '#':
continue
# key
else:
index = line.find("=")
key = line[0:index].strip()
value = line[index+1:].strip()
if self.hasKey(key):
continue
else:
content[key] = value
self.filename = filename
def get(self, key):
if key not in self.content.keys():
self.set(key, "")
return self.content[key]
def write(self, filename = None):
if not filename and not self.filename:
return
if filename:
self.filename = filename
else:
filename = self.filename
if not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
fp = codecs.open(filename, 'w')
for (key, value) in self.content.items():
fp.write("%s=%s\n" % (key, value))
fp.write("\n")
def set(self, key, value):
self.content[key] = value
def removeKey(self, key):
for (name, value) in self.content.items():
if key == name:
del self.content[name]
def hasKey(self, key):
if self.content.has_key(key):
return True
else:
return False
def getFileName(self):
return self.filename
|
JoeyDeRosa/TrollPy | refs/heads/master | trollpy/models/meta.py | 1 | """Meta."""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import MetaData
# Recommended naming convention used by Alembic, as various different database
# providers will autogenerate vastly different names making migrations more
# difficult. See: http://alembic.zzzcomputing.com/en/latest/naming.html
NAMING_CONVENTION = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=NAMING_CONVENTION)
Base = declarative_base(metadata=metadata)
|
brain-tec/account-financial-reporting | refs/heads/8.0 | account_financial_report_webkit_xls/tests/test_trial_balance_xls.py | 3 | # -*- coding: utf-8 -*-
# Copyright 2009-2017 Noviat.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .test_common_xls import TestCommonXls
class TestTrialBalanceXls(TestCommonXls):
def _getReportModel(self):
return 'trial.balance.webkit'
def _getXlsReportName(self):
return 'account.account_report_trial_balance_xls'
def _getXlsReportActionName(self):
module = 'account_financial_report_webkit'
action = 'account_report_trial_balance_webkit'
return '%s.%s' % (module, action)
def _getBaseFilters(self):
return {}
def test_common(self):
common_tests = [
x for x in dir(self)
if callable(getattr(self, x)) and x.startswith('common_test_')]
for test in common_tests:
getattr(self, test)()
|
ComplexNetTSP/CooperativeNetworking | refs/heads/master | complex_systems/mobility/stabrnd.py | 1 | #-------------------------------------------------------------------------------
# Copyright (c) 2012 Vincent Gauthier.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#-------------------------------------------------------------------------------
__author__ = """\n""".join(['Vincent Gauthier <[email protected]>'])
__all__ = ['stabrnd']
def stabrnd(alpha, beta, c, delta, m, n):
'''
Stable Random Number Generator (McCulloch 12/18/96) based on the paper [Cha71]_
Returns m x n matrix of iid stable random numbers with characteristic
exponent alpha in [.1,2], skewness parameter beta in [-1,1], scale c > 0,
and location parameter delta. Based on the method of [Cha71]_ .
.. [Cha71] J.M. Chambers, C. L. Mallows and B. W.Stuck, "A Method for Simulating Stable Random Variables," JASA 71 (1976): 340-4.
:History of this Code:
- Encoded in MATLAB by J. Huston McCulloch, Ohio State University Econ. Dept. ([email protected]).
- Encoded in Python by V. Gauthier, Telecom SudParis, CNRS Lab ([email protected])
:Parameters:
- `alpha` : float
Characteristic exponent in [.1,2]
- `beta` : float
Skewness in [-1,1]
- `c` : float
Scale c > 0
- `delta` : float
Location parameter
- `m, n` : int
Dimension of the matrix resultat
:Returns:
- `x` : matrix
.. note:: The CMS method is applied in such a way that x will have the log characteristic function
.. math::
\\log E \\cdot e^{(ixt)} = i \\Delta t + \\psi( c \\cdot t ),
where
.. math::
\\psi(t) = -|t|^{\\alpha} \\cdot [ 1 - i\\beta \\cdot sign(t) \\cdot \\tan(\\pi \\alpha/2) ],\\ for\\ \\alpha \\neq 1
.. math::
\\psi(t) = -|t| \\cdot [1 + i \\beta (2 \\pi) \\cdot sign(t) \\cdot \\log |t| ],\\ for\\ \\alpha = 1.
With this parameterization, the stable cdf, see [Mcc96]_ for details.
.. math::
S(x; \\alpha, \\beta,c, \\delta) = S((x-\\delta)/c; \\alpha, \\beta, 1, 0).
When :math:`\\alpha = 2`:, the distribution is Gaussian with mean delta and variance :math:`2 c^2`, and beta has no effect.
When :math:`\\alpha > 1`, the mean is delta for all :math:`\\beta`.
When :math:`\\alpha <= 1`, the mean is undefined.
When :math:`\\beta = 0`, the distribution is symmetrical and delta is the median for all :math:`\\alpha`.
When :math:`\\alpha = 1` and :math:`\\beta = 0`, the distribution is Cauchy (arctangent) with median :math:`\\delta`.
When the submitted :math:`\\alpha` is > 2 or < .1, or :math:`\\beta` is outside [-1,1], an
error message is generated and x is returned as a matrix of NaNs.
:math:`\\alpha < 0.1` is not allowed here because of the non-negligible probability of overflows.
If you're only interested in the symmetric cases, you may just set :math:`\\beta = 0`
and skip the following considerations:
When :math:`\\beta > 0, (< 0)`, the distribution is skewed to the right (left).
When :math:`\\alpha < 1, \\delta`, as defined above, is the unique fractile that is
invariant under averaging of iid contributions. I call such a fractile a
"focus of stability." This, like the mean, is a natural location parameter.
When :math:`\\alpha = 1`, either every fractile is a focus of stability, as in the
:math:`\\beta = 0` Cauchy case, or else there is no focus of stability at all, as is
the case for :math:`\\beta ~= 0`. In the latter cases, which I call "afocal," delta is
just an arbitrary fractile that has a simple relation to the c.f.
When :math:`\\alpha > 1 and \\beta > 0`, med(x) must lie very far below the mean as
alpha approaches 1 from above. Furthermore, asalpha approaches 1 from below,
med(x) must lie very far above the focus of stability when :math:`\\beta > 0`. If :math:`\\beta
~= 0`, there is therefore a discontinuity in the distribution as a function
of alpha as alpha passes 1, when delta is held constant. CMS, following an
insight of Vladimir Zolotarev, remove this discontinuity by subtracting:
.. math::
\\beta \\cdot c \\cdot \\tan(\\pi \\cdot \\alpha/2)
in their program RSTAB, a.k.a. RNSTA in IMSL (formerly GGSTA). The result is
a random number whose distribution is a continuous function of alpha, but
whose location parameter (which I call zeta) is a shifted version of delta
that has no known interpretation other than computational convenience.
The present program restores the more meaningful :math:`\\delta` parameterization by
using the CMS (4.1), but with :math:`\\beta \\cdot c \\cdot tan(\\pi \\alpha/2)` added back in (ie with
their initial :math:`tan(\\alpha \\phi_0)` deleted). RNSTA therefore gives different
results than the present program when :math:`\\beta ~= 0`. However, the present beta
is equivalent to the CMS beta' (BPRIME).
Rather than using the CMS D2 and exp2 functions to compensate for the ill-
condition of the CMS (4.1) when :math:`\\alpha` is very near 1, the present program
merely fudges these cases by computing :math:`x` from their (2.4) and adjusting for
:math:`\\beta \\cdot c \\cdot tan(\\pi \\alpha/2)` when alpha is within 1.e-8 of 1. This should make no
difference for simulation results with samples of size less than
approximately 10^8, and then only when the desired alpha is within 1.e-8 of
1, but not equal to 1.
The frequently used Gaussian and symmetric cases are coded
separately so as to speed up execution.
**References**
.. [Mcc96] J.H. McCulloch, "On the parametrization of the afocal stable distributions," Bull. London Math. Soc. 28 (1996): 651-55,
.. [Zov86] V.M. Zolotarev, "One Dimensional Stable Laws," Amer. Math. Soc., 1986.
.. [Sam94] G. Samorodnitsky and M.S. Taqqu, "Stable Non-Gaussian Random Processes," Chapman & Hill, 1994.
.. [Jan94] A. Janicki and A. Weron, "Simulaton and Chaotic Behavior of Alpha-Stable Stochastic Processes," Dekker, 1994.
.. [Mcc97] J.H. McCulloch, "Financial Applications of Stable Distributons," Handbook of Statistics, Vol. 14, 1997.
'''
import numpy as N
import numpy.random as R
# Error traps
if alpha < .1 or alpha > 2 :
print 'Alpha must be in [.1,2] for function stabrnd.'
x = N.nan * N.zeros((n,m))
return x
if N.abs(beta) > 1 :
print 'Beta must be in [-1,1] for function stabrnd.'
x = N.nan * N.zeros((n,m))
return x
# Generate exponential w and uniform phi:
w = -N.log(R.rand(m,n))
phi = (R.rand(m,n) - 0.5) * N.pi
# Gaussian case (Box-Muller):
if alpha == 2:
x = (2*N.sqrt(w) * N.sin(phi))
x = delta + c*x
return x
# Symmetrical cases:
if beta == 0:
if alpha == 1: # Cauchy case
x = N.tan(phi)
else:
x = ((N.cos((1-alpha)*phi) / w) ** (1/alpha - 1) * N.sin(alpha * phi) / N.cos(phi) ** (1/alpha))
# General cases:
else:
cosphi = N.cos(phi)
if N.abs(alpha-1) > 1.e-8:
zeta = beta * N.tan(N.pi*alpha/2)
aphi = alpha * phi
a1phi = (1 - alpha) * phi
x = ((N.sin(aphi) + zeta * N.cos(aphi)) / cosphi) * ((N.cos(a1phi) + zeta * N.sin(a1phi)) / (w * cosphi)) ** ((1-alpha)/alpha)
else:
bphi = (N.pi/2) + beta * phi
x = (2/N.pi) * (bphi * N.tan(phi) - beta * N.log((N.pi/2) * w * cosphi / bphi))
if alpha != 1:
x = x + beta * N.tan(N.pi * alpha/2)
# Finale
x = delta + c * x
return x
|
michaelgallacher/intellij-community | refs/heads/master | python/testData/copyPaste/SelectionOneLine.dst.py | 83 | class MyClass(object):
member1 = 1
<selection> member2 = 2
<caret></selection> member3 = 3 |
leuschel/logen | refs/heads/master | old_logen/pylogen/Pmw/Pmw_1_2/lib/PmwBalloon.py | 6 | import os
import string
import Tkinter
import Pmw
class Balloon(Pmw.MegaToplevel):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('initwait', 500, None), # milliseconds
('label_background', 'lightyellow', None),
('label_foreground', 'black', None),
('label_justify', 'left', None),
('master', 'parent', None),
('relmouse', 'none', self._relmouse),
('state', 'both', self._state),
('statuscommand', None, None),
('xoffset', 20, None), # pixels
('yoffset', 1, None), # pixels
('hull_highlightthickness', 1, None),
('hull_highlightbackground', 'black', None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.MegaToplevel.__init__(self, parent)
self.withdraw()
self.overrideredirect(1)
# Create the components.
interior = self.interior()
self._label = self.createcomponent('label',
(), None,
Tkinter.Label, (interior,))
self._label.pack()
# The default hull configuration options give a black border
# around the balloon, but avoids a black 'flash' when the
# balloon is deiconified, before the text appears.
if not kw.has_key('hull_background'):
self.configure(hull_background = \
str(self._label.cget('background')))
# Initialise instance variables.
self._timer = None
# The widget or item that is currently triggering the balloon.
# It is None if the balloon is not being displayed. It is a
# one-tuple if the balloon is being displayed in response to a
# widget binding (value is the widget). It is a two-tuple if
# the balloon is being displayed in response to a canvas or
# text item binding (value is the widget and the item).
self._currentTrigger = None
# Check keywords and initialise options.
self.initialiseoptions()
def destroy(self):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
Pmw.MegaToplevel.destroy(self)
def bind(self, widget, balloonHelp, statusHelp = None):
# If a previous bind for this widget exists, remove it.
self.unbind(widget)
if balloonHelp is None and statusHelp is None:
return
if statusHelp is None:
statusHelp = balloonHelp
enterId = widget.bind('<Enter>',
lambda event, self = self, w = widget,
sHelp = statusHelp, bHelp = balloonHelp:
self._enter(event, w, sHelp, bHelp, 0))
# Set Motion binding so that if the pointer remains at rest
# within the widget until the status line removes the help and
# then the pointer moves again, then redisplay the help in the
# status line.
# Note: The Motion binding only works for basic widgets, and
# the hull of megawidgets but not for other megawidget components.
motionId = widget.bind('<Motion>',
lambda event = None, self = self, statusHelp = statusHelp:
self.showstatus(statusHelp))
leaveId = widget.bind('<Leave>', self._leave)
buttonId = widget.bind('<ButtonPress>', self._buttonpress)
# Set Destroy binding so that the balloon can be withdrawn and
# the timer can be cancelled if the widget is destroyed.
destroyId = widget.bind('<Destroy>', self._destroy)
# Use the None item in the widget's private Pmw dictionary to
# store the widget's bind callbacks, for later clean up.
if not hasattr(widget, '_Pmw_BalloonBindIds'):
widget._Pmw_BalloonBindIds = {}
widget._Pmw_BalloonBindIds[None] = \
(enterId, motionId, leaveId, buttonId, destroyId)
def unbind(self, widget):
if hasattr(widget, '_Pmw_BalloonBindIds'):
if widget._Pmw_BalloonBindIds.has_key(None):
(enterId, motionId, leaveId, buttonId, destroyId) = \
widget._Pmw_BalloonBindIds[None]
# Need to pass in old bindings, so that Tkinter can
# delete the commands. Otherwise, memory is leaked.
widget.unbind('<Enter>', enterId)
widget.unbind('<Motion>', motionId)
widget.unbind('<Leave>', leaveId)
widget.unbind('<ButtonPress>', buttonId)
widget.unbind('<Destroy>', destroyId)
del widget._Pmw_BalloonBindIds[None]
if self._currentTrigger is not None and len(self._currentTrigger) == 1:
# The balloon is currently being displayed and the current
# trigger is a widget.
triggerWidget = self._currentTrigger[0]
if triggerWidget == widget:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def tagbind(self, widget, tagOrItem, balloonHelp, statusHelp = None):
# If a previous bind for this widget's tagOrItem exists, remove it.
self.tagunbind(widget, tagOrItem)
if balloonHelp is None and statusHelp is None:
return
if statusHelp is None:
statusHelp = balloonHelp
enterId = widget.tag_bind(tagOrItem, '<Enter>',
lambda event, self = self, w = widget,
sHelp = statusHelp, bHelp = balloonHelp:
self._enter(event, w, sHelp, bHelp, 1))
motionId = widget.tag_bind(tagOrItem, '<Motion>',
lambda event = None, self = self, statusHelp = statusHelp:
self.showstatus(statusHelp))
leaveId = widget.tag_bind(tagOrItem, '<Leave>', self._leave)
buttonId = widget.tag_bind(tagOrItem, '<ButtonPress>', self._buttonpress)
# Use the tagOrItem item in the widget's private Pmw dictionary to
# store the tagOrItem's bind callbacks, for later clean up.
if not hasattr(widget, '_Pmw_BalloonBindIds'):
widget._Pmw_BalloonBindIds = {}
widget._Pmw_BalloonBindIds[tagOrItem] = \
(enterId, motionId, leaveId, buttonId)
def tagunbind(self, widget, tagOrItem):
if hasattr(widget, '_Pmw_BalloonBindIds'):
if widget._Pmw_BalloonBindIds.has_key(tagOrItem):
(enterId, motionId, leaveId, buttonId) = \
widget._Pmw_BalloonBindIds[tagOrItem]
widget.tag_unbind(tagOrItem, '<Enter>', enterId)
widget.tag_unbind(tagOrItem, '<Motion>', motionId)
widget.tag_unbind(tagOrItem, '<Leave>', leaveId)
widget.tag_unbind(tagOrItem, '<ButtonPress>', buttonId)
del widget._Pmw_BalloonBindIds[tagOrItem]
if self._currentTrigger is None:
# The balloon is not currently being displayed.
return
if len(self._currentTrigger) == 1:
# The current trigger is a widget.
return
if len(self._currentTrigger) == 2:
# The current trigger is a canvas item.
(triggerWidget, triggerItem) = self._currentTrigger
if triggerWidget == widget and triggerItem == tagOrItem:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
else: # The current trigger is a text item.
(triggerWidget, x, y) = self._currentTrigger
if triggerWidget == widget:
currentPos = widget.index('@%d,%d' % (x, y))
currentTags = widget.tag_names(currentPos)
if tagOrItem in currentTags:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def showstatus(self, statusHelp):
if self['state'] in ('status', 'both'):
cmd = self['statuscommand']
if callable(cmd):
cmd(statusHelp)
def clearstatus(self):
self.showstatus(None)
def _state(self):
if self['state'] not in ('both', 'balloon', 'status', 'none'):
raise ValueError, 'bad state option ' + repr(self['state']) + \
': should be one of \'both\', \'balloon\', ' + \
'\'status\' or \'none\''
def _relmouse(self):
if self['relmouse'] not in ('both', 'x', 'y', 'none'):
raise ValueError, 'bad relmouse option ' + repr(self['relmouse'])+ \
': should be one of \'both\', \'x\', ' + '\'y\' or \'none\''
def _enter(self, event, widget, statusHelp, balloonHelp, isItem):
# Do not display balloon if mouse button is pressed. This
# will only occur if the button was pressed inside a widget,
# then the mouse moved out of and then back into the widget,
# with the button still held down. The number 0x1f00 is the
# button mask for the 5 possible buttons in X.
buttonPressed = (event.state & 0x1f00) != 0
if not buttonPressed and balloonHelp is not None and \
self['state'] in ('balloon', 'both'):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self._timer = self.after(self['initwait'],
lambda self = self, widget = widget, help = balloonHelp,
isItem = isItem:
self._showBalloon(widget, help, isItem))
if isItem:
if hasattr(widget, 'canvasx'):
# The widget is a canvas.
item = widget.find_withtag('current')
if len(item) > 0:
item = item[0]
else:
item = None
self._currentTrigger = (widget, item)
else:
# The widget is a text widget.
self._currentTrigger = (widget, event.x, event.y)
else:
self._currentTrigger = (widget,)
self.showstatus(statusHelp)
def _leave(self, event):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def _destroy(self, event):
# Only withdraw the balloon and cancel the timer if the widget
# being destroyed is the widget that triggered the balloon.
# Note that in a Tkinter Destroy event, the widget field is a
# string and not a widget as usual.
if self._currentTrigger is None:
# The balloon is not currently being displayed
return
if len(self._currentTrigger) == 1:
# The current trigger is a widget (not an item)
triggerWidget = self._currentTrigger[0]
if str(triggerWidget) == event.widget:
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self.clearstatus()
self._currentTrigger = None
def _buttonpress(self, event):
if self._timer is not None:
self.after_cancel(self._timer)
self._timer = None
self.withdraw()
self._currentTrigger = None
def _showBalloon(self, widget, balloonHelp, isItem):
self._label.configure(text = balloonHelp)
# First, display the balloon offscreen to get dimensions.
screenWidth = self.winfo_screenwidth()
screenHeight = self.winfo_screenheight()
self.geometry('+%d+0' % (screenWidth + 1))
self.update_idletasks()
if isItem:
# Get the bounding box of the current item.
bbox = widget.bbox('current')
if bbox is None:
# The item that triggered the balloon has disappeared,
# perhaps by a user's timer event that occured between
# the <Enter> event and the 'initwait' timer calling
# this method.
return
# The widget is either a text or canvas. The meaning of
# the values returned by the bbox method is different for
# each, so use the existence of the 'canvasx' method to
# distinguish between them.
if hasattr(widget, 'canvasx'):
# The widget is a canvas. Place balloon under canvas
# item. The positions returned by bbox are relative
# to the entire canvas, not just the visible part, so
# need to convert to window coordinates.
leftrel = bbox[0] - widget.canvasx(0)
toprel = bbox[1] - widget.canvasy(0)
bottomrel = bbox[3] - widget.canvasy(0)
else:
# The widget is a text widget. Place balloon under
# the character closest to the mouse. The positions
# returned by bbox are relative to the text widget
# window (ie the visible part of the text only).
leftrel = bbox[0]
toprel = bbox[1]
bottomrel = bbox[1] + bbox[3]
else:
leftrel = 0
toprel = 0
bottomrel = widget.winfo_height()
xpointer, ypointer = widget.winfo_pointerxy() # -1 if off screen
if xpointer >= 0 and self['relmouse'] in ('both', 'x'):
x = xpointer
else:
x = leftrel + widget.winfo_rootx()
x = x + self['xoffset']
if ypointer >= 0 and self['relmouse'] in ('both', 'y'):
y = ypointer
else:
y = bottomrel + widget.winfo_rooty()
y = y + self['yoffset']
edges = (string.atoi(str(self.cget('hull_highlightthickness'))) +
string.atoi(str(self.cget('hull_borderwidth')))) * 2
if x + self._label.winfo_reqwidth() + edges > screenWidth:
x = screenWidth - self._label.winfo_reqwidth() - edges
if y + self._label.winfo_reqheight() + edges > screenHeight:
if ypointer >= 0 and self['relmouse'] in ('both', 'y'):
y = ypointer
else:
y = toprel + widget.winfo_rooty()
y = y - self._label.winfo_reqheight() - self['yoffset'] - edges
Pmw.setgeometryanddeiconify(self, '+%d+%d' % (x, y))
|
gojira/tensorflow | refs/heads/master | tensorflow/python/keras/datasets/boston_housing.py | 23 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Boston housing price regression dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.datasets.boston_housing.load_data')
def load_data(path='boston_housing.npz', test_split=0.2, seed=113):
"""Loads the Boston Housing dataset.
Arguments:
path: path where to cache the dataset locally
(relative to ~/.keras/datasets).
test_split: fraction of the data to reserve as test set.
seed: Random seed for shuffling the data
before computing the test split.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
assert 0 <= test_split < 1
origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
path = get_file(
path,
origin=origin_folder + 'boston_housing.npz',
file_hash=
'f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5')
with np.load(path) as f:
x = f['x']
y = f['y']
np.random.seed(seed)
indices = np.arange(len(x))
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
x_train = np.array(x[:int(len(x) * (1 - test_split))])
y_train = np.array(y[:int(len(x) * (1 - test_split))])
x_test = np.array(x[int(len(x) * (1 - test_split)):])
y_test = np.array(y[int(len(x) * (1 - test_split)):])
return (x_train, y_train), (x_test, y_test)
|
lukasjapan/bt-speaker | refs/heads/master | bt_manager/__init__.py | 1 | from distutils.version import StrictVersion
import cffi
import os
__version__ = '0.3.1'
if StrictVersion(cffi.__version__) < StrictVersion('0.7'):
raise RuntimeError(
'bt_manager requires cffi >= 0.7, but found %s' % cffi.__version__)
ffi = cffi.FFI()
cwd = os.path.dirname(__file__)
header_file = os.path.join(cwd, 'rtpsbc.h')
with open(header_file) as fh:
header = fh.read()
ffi.cdef(header)
fh.close()
# from bt_manager.adapter import BTAdapter # noqa
# from bt_manager.agent import BTAgent # noqa
# from bt_manager.attributes import ATTRIBUTES # noqa
from bt_manager.audio import BTAudio, BTAudioSource # noqa
from bt_manager.audio import BTAudioSink, SBCAudioCodec # noqa
from bt_manager.audio import SBCAudioSource, SBCAudioSink # noqa
# from bt_manager.cod import BTCoD # noqa
from bt_manager.codecs import * # noqa
# from bt_manager.control import BTControl # noqa
# from bt_manager.device import BTGenericDevice, BTDevice # noqa
# from bt_manager.discovery import BTDiscoveryInfo # noqa
from bt_manager.exceptions import * # noqa
# from bt_manager.headset import BTHeadset # noqa
# from bt_manager.headset import BTHeadsetGateway # noqa
from bt_manager.interface import BTSimpleInterface # noqa
from bt_manager.interface import BTInterface # noqa
# from bt_manager.manager import BTManager # noqa
from bt_manager.media import BTMedia, BTMediaTransport # noqa
# from bt_manager.input import BTInput # noqa
from bt_manager.serviceuuids import SERVICES # noqa
from bt_manager.uuid import BTUUID, BTUUID16, BTUUID32 # noqa
from bt_manager.uuid import BASE_UUID # noqa
# from bt_manager.vendors import VENDORS # noqa
|
spblightadv/rethinkdb | refs/heads/next | test/rql_test/connections/http_support/werkzeug/_compat.py | 448 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return str(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
|
kxliugang/edx-platform | refs/heads/master | lms/djangoapps/teams/tests/__init__.py | 12133432 | |
benjamin-jones/pupy | refs/heads/master | pupy/packages/windows/x86/psutil/_pssunos.py | 80 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sun OS Solaris platform implementation."""
import errno
import os
import socket
import subprocess
import sys
from collections import namedtuple
from . import _common
from . import _psposix
from . import _psutil_posix as cext_posix
from . import _psutil_sunos as cext
from ._common import isfile_strict, socktype_to_enum, sockfam_to_enum
from ._common import usage_percent
from ._compat import PY3
__extra__all__ = ["CONN_IDLE", "CONN_BOUND"]
PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
AF_LINK = cext_posix.AF_LINK
CONN_IDLE = "IDLE"
CONN_BOUND = "BOUND"
PROC_STATUSES = {
cext.SSLEEP: _common.STATUS_SLEEPING,
cext.SRUN: _common.STATUS_RUNNING,
cext.SZOMB: _common.STATUS_ZOMBIE,
cext.SSTOP: _common.STATUS_STOPPED,
cext.SIDL: _common.STATUS_IDLE,
cext.SONPROC: _common.STATUS_RUNNING, # same as run
cext.SWAIT: _common.STATUS_WAITING,
}
TCP_STATUSES = {
cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.TCPS_CLOSED: _common.CONN_CLOSE,
cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
cext.TCPS_LISTEN: _common.CONN_LISTEN,
cext.TCPS_CLOSING: _common.CONN_CLOSING,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
cext.TCPS_IDLE: CONN_IDLE, # sunos specific
cext.TCPS_BOUND: CONN_BOUND, # sunos specific
}
scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
pextmem = namedtuple('pextmem', ['rss', 'vms'])
pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# set later from __init__.py
NoSuchProcess = None
ZombieProcess = None
AccessDenied = None
TimeoutExpired = None
# --- functions
disk_io_counters = cext.disk_io_counters
net_io_counters = cext.net_io_counters
disk_usage = _psposix.disk_usage
net_if_addrs = cext_posix.net_if_addrs
def virtual_memory():
# we could have done this with kstat, but imho this is good enough
total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
# note: there's no difference on Solaris
free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
used = total - free
percent = usage_percent(used, total, _round=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
sin, sout = cext.swap_mem()
# XXX
# we are supposed to get total/free by doing so:
# http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
# usr/src/cmd/swap/swap.c
# ...nevertheless I can't manage to obtain the same numbers as 'swap'
# cmdline utility, so let's parse its output (sigh!)
p = subprocess.Popen(['/usr/bin/env', 'PATH=/usr/sbin:/sbin:%s' %
os.environ['PATH'], 'swap', '-l', '-k'],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout = stdout.decode(sys.stdout.encoding)
if p.returncode != 0:
raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode)
lines = stdout.strip().split('\n')[1:]
if not lines:
raise RuntimeError('no swap device(s) configured')
total = free = 0
for line in lines:
line = line.split()
t, f = line[-2:]
t = t.replace('K', '')
f = f.replace('K', '')
total += int(int(t) * 1024)
free += int(int(f) * 1024)
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent,
sin * PAGE_SIZE, sout * PAGE_SIZE)
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir('/proc') if x.isdigit()]
def pid_exists(pid):
"""Check for the existence of a unix pid."""
return _psposix.pid_exists(pid)
def cpu_times():
"""Return system-wide CPU times as a named tuple"""
ret = cext.per_cpu_times()
return scputimes(*[sum(x) for x in zip(*ret)])
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples"""
ret = cext.per_cpu_times()
return [scputimes(*x) for x in ret]
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# mimic os.cpu_count() behavior
return None
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
return cext.cpu_count_phys()
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
localhost = (':0.0', ':0')
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname in localhost:
hostname = 'localhost'
nt = _common.suser(user, tty, hostname, tstamp)
retlist.append(nt)
return retlist
def disk_partitions(all=False):
"""Return system disk partitions."""
# TODO - the filtering logic should be better checked so that
# it tries to reflect 'df' as much as possible
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
# Differently from, say, Linux, we don't have a list of
# common fs types so the best we can do, AFAIK, is to
# filter by filesystem having a total size > 0.
if not disk_usage(mountpoint).total:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
Only INET sockets are returned (UNIX are not).
"""
cmap = _common.conn_tmap.copy()
if _pid == -1:
cmap.pop('unix', 0)
if kind not in cmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in cmap])))
families, types = _common.conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = set()
for item in rawlist:
fd, fam, type_, laddr, raddr, status, pid = item
if fam not in families:
continue
if type_ not in types:
continue
status = TCP_STATUSES[status]
fam = sockfam_to_enum(fam)
type_ = socktype_to_enum(type_)
if _pid == -1:
nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
else:
nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
ret.add(nt)
return list(ret)
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
ret = cext.net_if_stats()
for name, items in ret.items():
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if (NoSuchProcess is None or AccessDenied is None or
ZombieProcess is None):
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name", "_ppid"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
@wrap_exceptions
def name(self):
# note: max len == 15
return cext.proc_name_and_args(self.pid)[0]
@wrap_exceptions
def exe(self):
# Will be guess later from cmdline but we want to explicitly
# invoke cmdline here in order to get an AccessDenied
# exception if the user has not enough privileges.
self.cmdline()
return ""
@wrap_exceptions
def cmdline(self):
return cext.proc_name_and_args(self.pid)[1].split(' ')
@wrap_exceptions
def create_time(self):
return cext.proc_basic_info(self.pid)[3]
@wrap_exceptions
def num_threads(self):
return cext.proc_basic_info(self.pid)[5]
@wrap_exceptions
def nice_get(self):
# For some reason getpriority(3) return ESRCH (no such process)
# for certain low-pid processes, no matter what (even as root).
# The process actually exists though, as it has a name,
# creation time, etc.
# The best thing we can do here appears to be raising AD.
# Note: tested on Solaris 11; on Open Solaris 5 everything is
# fine.
try:
return cext_posix.getpriority(self.pid)
except EnvironmentError as err:
# 48 is 'operation not supported' but errno does not expose
# it. It occurs for low system pids.
if err.errno in (errno.ENOENT, errno.ESRCH, 48):
if pid_exists(self.pid):
raise AccessDenied(self.pid, self._name)
raise
@wrap_exceptions
def nice_set(self, value):
if self.pid in (2, 3):
# Special case PIDs: internally setpriority(3) return ESRCH
# (no such process), no matter what.
# The process actually exists though, as it has a name,
# creation time, etc.
raise AccessDenied(self.pid, self._name)
return cext_posix.setpriority(self.pid, value)
@wrap_exceptions
def ppid(self):
return cext.proc_basic_info(self.pid)[0]
@wrap_exceptions
def uids(self):
real, effective, saved, _, _, _ = cext.proc_cred(self.pid)
return _common.puids(real, effective, saved)
@wrap_exceptions
def gids(self):
_, _, _, real, effective, saved = cext.proc_cred(self.pid)
return _common.puids(real, effective, saved)
@wrap_exceptions
def cpu_times(self):
user, system = cext.proc_cpu_times(self.pid)
return _common.pcputimes(user, system)
@wrap_exceptions
def terminal(self):
hit_enoent = False
tty = wrap_exceptions(
cext.proc_basic_info(self.pid)[0])
if tty != cext.PRNODEV:
for x in (0, 1, 2, 255):
try:
return os.readlink('/proc/%d/path/%d' % (self.pid, x))
except OSError as err:
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
@wrap_exceptions
def cwd(self):
# /proc/PID/path/cwd may not be resolved by readlink() even if
# it exists (ls shows it). If that's the case and the process
# is still alive return None (we can return None also on BSD).
# Reference: http://goo.gl/55XgO
try:
return os.readlink("/proc/%s/path/cwd" % self.pid)
except OSError as err:
if err.errno == errno.ENOENT:
os.stat("/proc/%s" % self.pid)
return None
raise
@wrap_exceptions
def memory_info(self):
ret = cext.proc_basic_info(self.pid)
rss, vms = ret[1] * 1024, ret[2] * 1024
return _common.pmem(rss, vms)
# it seems Solaris uses rss and vms only
memory_info_ex = memory_info
@wrap_exceptions
def status(self):
code = cext.proc_basic_info(self.pid)[6]
# XXX is '?' legit? (we're not supposed to return it anyway)
return PROC_STATUSES.get(code, '?')
@wrap_exceptions
def threads(self):
ret = []
tids = os.listdir('/proc/%d/lwp' % self.pid)
hit_enoent = False
for tid in tids:
tid = int(tid)
try:
utime, stime = cext.query_process_thread(
self.pid, tid)
except EnvironmentError as err:
# ENOENT == thread gone in meantime
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
nt = _common.pthread(tid, utime, stime)
ret.append(nt)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def open_files(self):
retlist = []
hit_enoent = False
pathdir = '/proc/%d/path' % self.pid
for fd in os.listdir('/proc/%d/fd' % self.pid):
path = os.path.join(pathdir, fd)
if os.path.islink(path):
try:
file = os.readlink(path)
except OSError as err:
# ENOENT == file which is gone in the meantime
if err.errno == errno.ENOENT:
hit_enoent = True
continue
raise
else:
if isfile_strict(file):
retlist.append(_common.popenfile(file, int(fd)))
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
def _get_unix_sockets(self, pid):
"""Get UNIX sockets used by process by parsing 'pfiles' output."""
# TODO: rewrite this in C (...but the damn netstat source code
# does not include this part! Argh!!)
cmd = "pfiles %s" % pid
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout, stderr = [x.decode(sys.stdout.encoding)
for x in (stdout, stderr)]
if p.returncode != 0:
if 'permission denied' in stderr.lower():
raise AccessDenied(self.pid, self._name)
if 'no such process' in stderr.lower():
raise NoSuchProcess(self.pid, self._name)
raise RuntimeError("%r command error\n%s" % (cmd, stderr))
lines = stdout.split('\n')[2:]
for i, line in enumerate(lines):
line = line.lstrip()
if line.startswith('sockname: AF_UNIX'):
path = line.split(' ', 2)[2]
type = lines[i - 2].strip()
if type == 'SOCK_STREAM':
type = socket.SOCK_STREAM
elif type == 'SOCK_DGRAM':
type = socket.SOCK_DGRAM
else:
type = -1
yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
@wrap_exceptions
def connections(self, kind='inet'):
ret = net_connections(kind, _pid=self.pid)
# The underlying C implementation retrieves all OS connections
# and filters them by PID. At this point we can't tell whether
# an empty list means there were no connections for process or
# process is no longer active so we force NSP in case the PID
# is no longer there.
if not ret:
os.stat('/proc/%s' % self.pid) # will raise NSP if process is gone
# UNIX sockets
if kind in ('all', 'unix'):
ret.extend([_common.pconn(*conn) for conn in
self._get_unix_sockets(self.pid)])
return ret
nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
@wrap_exceptions
def memory_maps(self):
def toaddr(start, end):
return '%s-%s' % (hex(start)[2:].strip('L'),
hex(end)[2:].strip('L'))
retlist = []
rawlist = cext.proc_memory_maps(self.pid)
hit_enoent = False
for item in rawlist:
addr, addrsize, perm, name, rss, anon, locked = item
addr = toaddr(addr, addrsize)
if not name.startswith('['):
try:
name = os.readlink('/proc/%s/path/%s' % (self.pid, name))
except OSError as err:
if err.errno == errno.ENOENT:
# sometimes the link may not be resolved by
# readlink() even if it exists (ls shows it).
# If that's the case we just return the
# unresolved link path.
# This seems an incosistency with /proc similar
# to: http://goo.gl/55XgO
name = '/proc/%s/path/%s' % (self.pid, name)
hit_enoent = True
else:
raise
retlist.append((addr, perm, name, rss, anon, locked))
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def num_ctx_switches(self):
return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
# support for private module import
if TimeoutExpired is None:
raise
raise TimeoutExpired(timeout, self.pid, self._name)
|
sl2017/campos | refs/heads/8.0 | campos_event/models/camp_area.py | 1 | # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.addons.base_geoengine import geo_model
from openerp.addons.base_geoengine import fields as geo_fields
import logging
_logger = logging.getLogger(__name__)
class CamposCampArea(geo_model.GeoModel):
_description = 'Camp Area'
_name = 'campos.camp.area'
name = fields.Char('Name', size=64)
code = fields.Char('Code', size=16)
desc = fields.Text('Description')
max_cap = fields.Integer('Max')
event_id = fields.Many2one('event.event', 'Event')
reg_ids = fields.One2many('event.registration', 'camp_area_id', 'Troops')
reg_view_ids = fields.One2many('campos.registration.view', 'camp_area_id', 'Troops')
addreg_id = fields.Many2one('event.registration', 'Add Registration', ondelete='set null', domain=[('state','!=', 'cancel')])
allocated = fields.Integer('Allocated', compute="_compute_allocated")
subcamp_id = fields.Many2one('campos.subcamp', 'Sub Camp')
the_geom = geo_fields.GeoMultiPolygon('NPA Shape')
committee_id = fields.Many2one('campos.committee',
'Committee',
ondelete='cascade')
part_function_ids = fields.One2many(related='committee_id.part_function_ids', string='Coordinators')
subcamp_function_ids = fields.One2many(related='subcamp_id.committee_id.part_function_ids', string='Camp Area Resp.')
mailgroup_id = fields.Many2one('mail.group',
'Mail list',
ondelete='cascade')
@api.one
@api.depends('reg_ids')
def _compute_allocated(self):
self.allocated = len(self.reg_ids)
@api.one
def _create_committee(self):
if not self.committee_id:
parent = self.env['campos.committee'].search([('name', '=', self.subcamp_id.name), ('parent_id', '=', self.env.ref('campos_event.camp_area_committee').id)])
if not parent:
parent = self.env['campos.committee'].create({'name': self.subcamp_id.name,
'parent_id' : self.env.ref('campos_event.camp_area_committee').id
}
)
self.committee_id = self.env['campos.committee'].create({'name': self.name,
'code': self.code,
'parent_id': parent.id,
})
# if not self.mailgroup_id:
# self.mailgroup_id = self.env['mail.group'].with_context(mail_create_nosubscribe=True).create({'name': "Kvarter %s / %s" % (self.name, self.subcamp_id.name),
# 'alias_name': "kvarter-%s" % (self.code),
# }) |
renzon/appengine-search-api-test | refs/heads/master | backend/apps/course_app/course_model.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from google.appengine.ext import ndb
from gaegraph.model import Node
from gaeforms.ndb import property
class Course(Node):
name = ndb.StringProperty(required=True)
price = property.SimpleCurrency(required=True)
start_date = ndb.DateProperty(required=True)
|
scotthartbti/android_external_chromium_org | refs/heads/kk44 | tools/telemetry/telemetry/core/extension_page.py | 23 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import web_contents
class ExtensionPage(web_contents.WebContents):
"""Represents a an extension page in the browser"""
def __init__(self, extension_id, url, inspector_backend):
super(ExtensionPage, self).__init__(inspector_backend)
self.extension_id = extension_id
self.url = url
assert url.startswith('chrome-extension://' + extension_id)
def __del__(self):
super(ExtensionPage, self).__del__()
def Reload(self):
""" Reloading an extension page is used as a workaround for an extension
binding bug for old versions of Chrome (crbug.com/263162). After Navigate
returns, we are guaranteed that the inspected page is in the correct state.
"""
self._inspector_backend.Navigate(self.url, None, 10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.