repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/pyrepl/console.py | 13 | 2560 | # Copyright 2000-2004 Michael Hudson-Doyle <[email protected]>
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
class Event:
"""An Event. `evt' is 'key' or somesuch."""
def __init__(self, evt, data, raw=''):
self.evt = evt
self.data = data
self.raw = raw
def __repr__(self):
return 'Event(%r, %r)'%(self.evt, self.data)
class Console:
"""Attributes:
screen,
height,
width,
"""
def refresh(self, screen, xy):
pass
def prepare(self):
pass
def restore(self):
pass
def move_cursor(self, x, y):
pass
def set_cursor_vis(self, vis):
pass
def getheightwidth(self):
"""Return (height, width) where height and width are the height
and width of the terminal window in characters."""
pass
def get_event(self, block=1):
"""Return an Event instance. Returns None if |block| is false
and there is no event pending, otherwise waits for the
completion of an event."""
pass
def beep(self):
pass
def clear(self):
"""Wipe the screen"""
pass
def finish(self):
"""Move the cursor to the end of the display and otherwise get
ready for end. XXX could be merged with restore? Hmm."""
pass
def flushoutput(self):
"""Flush all output to the screen (assuming there's some
buffering going on somewhere)."""
pass
def forgetinput(self):
"""Forget all pending, but not yet processed input."""
pass
def getpending(self):
"""Return the characters that have been typed but not yet
processed."""
pass
def wait(self):
"""Wait for an event."""
pass
| mit |
DooMLoRD/android_kernel_sony_msm8960t | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
googleads/google-ads-python | google/ads/googleads/v7/services/types/detail_placement_view_service.py | 1 | 1273 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.services",
marshal="google.ads.googleads.v7",
manifest={"GetDetailPlacementViewRequest",},
)
class GetDetailPlacementViewRequest(proto.Message):
r"""Request message for
[DetailPlacementViewService.GetDetailPlacementView][google.ads.googleads.v7.services.DetailPlacementViewService.GetDetailPlacementView].
Attributes:
resource_name (str):
Required. The resource name of the Detail
Placement view to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
dharmasam9/moose-core | scripts/cmake_sanity_check.py | 1 | 2830 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""cmake_sanity_check.py: Check if Cmake files are ok.
Last modified: Sat Jan 18, 2014 05:01PM
NOTE: Run in this directory only.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import sys
import os
import re
from collections import defaultdict
makefiles = {}
cmakefiles = {}
makedirs = set()
cmakedirs = set()
def check(d):
searchMakefiles(d)
checkMissingCMake()
checkSrcs()
def checkMissingCMake():
if (makedirs - cmakedirs):
print("[Failed] Test 1")
print("Following directories have Makefile but not a CMakeFiles.txt file.")
print("%s" % "\t\n".join(makedirs - cmakedirs))
def searchMakefiles(topdir):
for d, subd, fs in os.walk(topdir):
if "../_build" in d or ".." == d: continue
if "CMakeLists.txt" in fs:
cmakedirs.add(d)
cmakefiles[d] = fs
if "Makefile" in fs:
if "_build" in d:
continue
else:
makedirs.add(d)
makefiles[d] = fs
else: pass
def checkSrcs():
objPat = re.compile(r"\w+\.o")
srcPat = re.compile(r"\w+\.cpp")
srcs = []
csrcs = []
for d in makefiles:
with open(os.path.join(d, "Makefile"), "r") as f:
txt = f.read()
for i in txt.split("\n\n"):
if "OBJ =" in i.upper():
for j in i.split():
if ".o" in j.strip():
srcs.append("%s"%(j.strip()))
try:
with open(os.path.join(d, "CMakeLists.txt"), "r") as f:
txt = f.read()
csrcs = srcPat.findall(txt)
except:
print("Dir {} does not have CMakeLists.txt".format(d))
csrcs = []
#print("[TEST 2] Checking if CMake is creating extra objects")
for csr in csrcs:
objName = csr.replace(".cpp", ".o")
if objName in srcs:
pass
else:
print(" Failed: In dir {}, CMake is creating extra object {}".format(d, objName))
pass
print("[TEST 3] Checking if CMake is missing some objects")
for obj in srcs:
srcName = obj.replace(".o", ".cpp")
if srcName in csrcs: pass
else:
print(" Failed: In dir {}, CMake is missing object {}".format(d,
srcName))
def main():
test_dir = sys.argv[1]
check(test_dir)
if __name__ == '__main__':
main()
| gpl-3.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/abc.py | 106 | 8624 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
# Note: this counter is private. Use `abc.get_cache_token()` for
# external code.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
class ABC(metaclass=ABCMeta):
"""Helper class that provides a standard way to create an ABC using
inheritance.
"""
pass
def get_cache_token():
"""Returns the current ABC cache token.
The token is an opaque object (supporting equality testing) identifying the
current version of the ABC cache for virtual subclasses. The token changes
with every call to ``register()`` on any ABC.
"""
return ABCMeta._abc_invalidation_counter
| lgpl-3.0 |
caphrim007/ansible | lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_facts.py | 14 | 5925 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqldatabase_facts
version_added: "2.7"
short_description: Get Azure MySQL Database facts.
description:
- Get facts of MySQL Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of MySQL Database
azure_rm_mysqldatabase_facts:
resource_group: resource_group_name
server_name: server_name
name: database_name
- name: List instances of MySQL Database
azure_rm_mysqldatabase_facts:
resource_group: resource_group_name
server_name: server_name
'''
RETURN = '''
databases:
description: A list of dictionaries containing facts for MySQL Databases.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testser
ver/databases/db1"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testrg
server_name:
description:
- Server name.
returned: always
type: str
sample: testserver
name:
description:
- Resource name.
returned: always
type: str
sample: db1
charset:
description:
- The charset of the database.
returned: always
type: str
sample: UTF8
collation:
description:
- The collation of the database.
returned: always
type: str
sample: English_United States.1252
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDatabasesFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMDatabasesFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.resource_group is not None and
self.server_name is not None and
self.name is not None):
self.results['databases'] = self.get()
elif (self.resource_group is not None and
self.server_name is not None):
self.results['databases'] = self.list_by_server()
return self.results
def get(self):
response = None
results = []
try:
response = self.mysql_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Databases.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
response = None
results = []
try:
response = self.mysql_client.databases.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'charset': d['charset'],
'collation': d['collation']
}
return d
def main():
AzureRMDatabasesFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
willkg/phil | phil/check.py | 1 | 5531 | ######################################################################
# This file is part of phil.
#
# Copyright (C) 2011, 2012, 2013 Will Kahn-Greene
#
# phil is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# phil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phil. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
import ConfigParser
import datetime
import phil.util
from phil.util import (
out, err, parse_configuration, parse_ics, get_next_date, should_remind,
format_date, generate_date_bits)
class Phil(object):
def __init__(self, quiet=False, debug=False):
self.config = None
self.quiet = quiet
self.debug = debug
def _run(self):
dtstart = datetime.datetime.today()
if not self.quiet:
out('Loading state....')
state = phil.util.load_state(self.config.datadir)
if not self.quiet:
out('Parsing ics file "{0}"....'.format(self.config.icsfile))
events = parse_ics(self.config.icsfile)
for event in events:
if not self.quiet:
out('Looking at event "{0}"....'.format(event.summary))
next_date = get_next_date(dtstart, event.rrule)
previous_remind = state.get(event.event_id)
if previous_remind and previous_remind == str(next_date.date()):
if not self.quiet:
out('Already sent a reminder for this meeting.')
continue
if should_remind(dtstart, next_date, self.config.remind):
if not self.quiet:
out('Sending reminder....')
summary = '{0} ({1})'.format(
event.summary, format_date(next_date))
description = event.description % generate_date_bits(next_date)
if self.debug:
out('From:', self.config.sender)
out('To:', self.config.to_list)
out('Subject:', summary)
out('Body:')
out(description, indent=' ', wrap=False)
else:
phil.util.send_mail_smtp(
self.config.sender, self.config.to_list, summary,
description, self.config.host, self.config.port)
state[event.event_id] = str(next_date.date())
elif not self.quiet:
out('Next reminder should get sent on {0}.'.format(
next_date.date() - datetime.timedelta(self.config.remind)))
phil.util.save_state(self.config.datadir, state)
def run(self, conffile):
if not self.quiet:
out('Parsing config file....')
try:
self.config = parse_configuration(conffile)
except ConfigParser.NoOptionError, noe:
err('Missing option in config file: {0}'.format(noe))
return 1
try:
self._run()
except Exception:
import traceback
err(''.join(traceback.format_exc()), wrap=False)
err('phil has died unexpectedly. If you think this is an error '
'(which it is), then contact phil\'s authors for help.')
return 1
if not self.quiet:
out('Finished!')
return 0
def _next6(self):
# TODO: This is a repeat of _run for the most part.
dtstart = datetime.datetime.today()
out('Loading state....')
state = phil.util.load_state(self.config.datadir)
out('Parsing ics file "{0}"....'.format(self.config.icsfile))
events = parse_ics(self.config.icsfile)
for event in events:
out('Looking at event "{0}"....'.format(event.summary))
next_date = dtstart
for i in range(6):
next_date = get_next_date(next_date, event.rrule)
previous_remind = state.get(event.event_id)
if (previous_remind
and previous_remind == str(next_date.date())):
out('* {0} (sent reminder already)'.format(
next_date.strftime('%c')))
else:
out('* {0}'.format(next_date.strftime('%c')))
next_date = next_date + datetime.timedelta(1)
def next6(self, conffile):
if not self.quiet:
out('Parsing config file....')
try:
self.config = parse_configuration(conffile)
except ConfigParser.NoOptionError, noe:
err('Missing option in config file: {0}'.format(noe))
return 1
try:
self._next6()
except Exception:
import traceback
err(''.join(traceback.format_exc()), wrap=False)
err('phil has died unexpectedly. If you think this is an error '
'(which it is), then contact phil\'s authors for help.')
return 1
if not self.quiet:
out('Finished!')
return 0
| gpl-3.0 |
ocadotechnology/boto | boto/fps/response.py | 153 | 7866 | # Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from decimal import Decimal
from boto.compat import filter, map
def ResponseFactory(action):
class FPSResponse(Response):
_action = action
_Result = globals().get(action + 'Result', ResponseElement)
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != action + 'Response':
super(FPSResponse, self).endElement(name, value, connection)
return FPSResponse
class ResponseElement(object):
def __init__(self, connection=None, name=None):
if connection is not None:
self._connection = connection
self._name = name or self.__class__.__name__
@property
def connection(self):
return self._connection
def __repr__(self):
render = lambda pair: '{!s}: {!r}'.format(*pair)
do_show = lambda pair: not pair[0].startswith('_')
attrs = filter(do_show, self.__dict__.items())
return '{0}({1})'.format(self.__class__.__name__,
', '.join(map(render, attrs)))
def startElement(self, name, attrs, connection):
return None
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != self._name:
setattr(self, name, value)
class Response(ResponseElement):
_action = 'Undefined'
def startElement(self, name, attrs, connection):
if name == 'ResponseMetadata':
setattr(self, name, ResponseElement(name=name))
elif name == self._action + 'Result':
setattr(self, name, self._Result(name=name))
else:
return super(Response, self).startElement(name, attrs, connection)
return getattr(self, name)
class ComplexAmount(ResponseElement):
def __repr__(self):
return '{0} {1}'.format(self.CurrencyCode, self.Value)
def __float__(self):
return float(self.Value)
def __str__(self):
return str(self.Value)
def startElement(self, name, attrs, connection):
if name not in ('CurrencyCode', 'Value'):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return super(ComplexAmount, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
super(ComplexAmount, self).endElement(name, value, connection)
class AmountCollection(ResponseElement):
def startElement(self, name, attrs, connection):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
class AccountBalance(AmountCollection):
def startElement(self, name, attrs, connection):
if name == 'AvailableBalances':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(AccountBalance, self).startElement(name, attrs, connection)
class GetAccountBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'AccountBalance':
setattr(self, name, AccountBalance(name=name))
return getattr(self, name)
return super(GetAccountBalanceResult, self).startElement(name, attrs,
connection)
class GetTotalPrepaidLiabilityResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingPrepaidLiability':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetTotalPrepaidLiabilityResult, self).startElement(name,
attrs, connection)
class GetPrepaidBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'PrepaidBalance':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetPrepaidBalanceResult, self).startElement(name, attrs,
connection)
class GetOutstandingDebtBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingDebt':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetOutstandingDebtBalanceResult, self).startElement(name,
attrs, connection)
class TransactionPart(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'FeesPaid':
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(TransactionPart, self).startElement(name, attrs,
connection)
class Transaction(ResponseElement):
def __init__(self, *args, **kw):
self.TransactionPart = []
super(Transaction, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'TransactionPart':
getattr(self, name).append(TransactionPart(name=name))
return getattr(self, name)[-1]
if name in ('TransactionAmount', 'FPSFees', 'Balance'):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(Transaction, self).startElement(name, attrs, connection)
class GetAccountActivityResult(ResponseElement):
def __init__(self, *args, **kw):
self.Transaction = []
super(GetAccountActivityResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Transaction':
getattr(self, name).append(Transaction(name=name))
return getattr(self, name)[-1]
return super(GetAccountActivityResult, self).startElement(name, attrs,
connection)
class GetTransactionResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'Transaction':
setattr(self, name, Transaction(name=name))
return getattr(self, name)
return super(GetTransactionResult, self).startElement(name, attrs,
connection)
class GetTokensResult(ResponseElement):
def __init__(self, *args, **kw):
self.Token = []
super(GetTokensResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Token':
getattr(self, name).append(ResponseElement(name=name))
return getattr(self, name)[-1]
return super(GetTokensResult, self).startElement(name, attrs,
connection)
| mit |
dancingdan/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py | 24 | 5768 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
rng = np.random.RandomState(42)
class SoftplusBijectorTest(test.TestCase):
"""Tests the correctness of the Y = g(X) = Log[1 + exp(X)] transformation."""
def _softplus(self, x):
return np.log(1 + np.exp(x))
def _softplus_inverse(self, y):
return np.log(np.exp(y) - 1)
def _softplus_ildj_before_reduction(self, y):
"""Inverse log det jacobian, before being reduced."""
return -np.log(1 - np.exp(-y))
def testHingeSoftnessZeroRaises(self):
with self.cached_session():
bijector = Softplus(hinge_softness=0., validate_args=True)
with self.assertRaisesOpError("must be non-zero"):
bijector.forward([1., 1.]).eval()
def testBijectorForwardInverseEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorForwardInverseWithHingeSoftnessEventDimsZero(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.5)
x = 2 * rng.randn(2, 10)
y = 1.5 * self._softplus(x / 1.5)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
# No reduction needed if event_dims = 0.
ildj = self._softplus_ildj_before_reduction(y)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval())
def testBijectorForwardInverseEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
ildj_before = self._softplus_ildj_before_reduction(y)
ildj = np.sum(ildj_before, axis=1)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = Softplus()
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithPositiveHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithNegativeHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testBijectiveAndFinite32bit(self):
with self.cached_session():
bijector = Softplus()
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithPositiveHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.23)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithNegativeHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-0.7)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = -np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFinite16bit(self):
with self.cached_session():
bijector = Softplus()
# softplus(-20) is zero, so we can't use such a large range as in 32bit.
x = np.linspace(-10., 20., 100).astype(np.float16)
# Note that float16 is only in the open set (0, inf) for a smaller
# logspace range. The actual range was (-7, 4), so use something smaller
# for the test.
y = np.logspace(-6, 3, 100).astype(np.float16)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-1, atol=1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
SPriyaJain/studybuddy | env/lib/python2.7/site-packages/click/__init__.py | 135 | 2858 | # -*- coding: utf-8 -*-
"""
click
~~~~~
Click is a simple Python module that wraps the stdlib's optparse to make
writing command line scripts fun. Unlike other modules, it's based around
a simple API that does not come with too much magic and is composable.
In case optparse ever gets removed from the stdlib, it will be shipped by
this module.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
# Core classes
from .core import Context, BaseCommand, Command, MultiCommand, Group, \
CommandCollection, Parameter, Option, Argument
# Globals
from .globals import get_current_context
# Decorators
from .decorators import pass_context, pass_obj, make_pass_decorator, \
command, group, argument, option, confirmation_option, \
password_option, version_option, help_option
# Types
from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
# Utilities
from .utils import echo, get_binary_stream, get_text_stream, open_file, \
format_filename, get_app_dir, get_os_args
# Terminal functions
from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
pause
# Exceptions
from .exceptions import ClickException, UsageError, BadParameter, \
FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
MissingParameter
# Formatting
from .formatting import HelpFormatter, wrap_text
# Parsing
from .parser import OptionParser
__all__ = [
# Core classes
'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
'CommandCollection', 'Parameter', 'Option', 'Argument',
# Globals
'get_current_context',
# Decorators
'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
'argument', 'option', 'confirmation_option', 'password_option',
'version_option', 'help_option',
# Types
'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
# Utilities
'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
'format_filename', 'get_app_dir', 'get_os_args',
# Terminal functions
'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
'getchar', 'pause',
# Exceptions
'ClickException', 'UsageError', 'BadParameter', 'FileError',
'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
'MissingParameter',
# Formatting
'HelpFormatter', 'wrap_text',
# Parsing
'OptionParser',
]
# Controls if click should emit the warning about the use of unicode
# literals.
disable_unicode_literals_warning = False
__version__ = '6.7'
| mit |
mrquim/repository.mrquim | script.module.youtube.dl/lib/youtube_dl/extractor/libraryofcongress.py | 44 | 4662 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
parse_filesize,
)
class LibraryOfCongressIE(InfoExtractor):
IE_NAME = 'loc'
IE_DESC = 'Library of Congress'
_VALID_URL = r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P<id>[0-9]+)'
_TESTS = [{
# embedded via <div class="media-player"
'url': 'http://loc.gov/item/90716351/',
'md5': '353917ff7f0255aa6d4b80a034833de8',
'info_dict': {
'id': '90716351',
'ext': 'mp4',
'title': "Pa's trip to Mars",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 0,
'view_count': int,
},
}, {
# webcast embedded via mediaObjectId
'url': 'https://www.loc.gov/today/cyberlc/feature_wdesc.php?rec=5578',
'info_dict': {
'id': '5578',
'ext': 'mp4',
'title': 'Help! Preservation Training Needs Here, There & Everywhere',
'duration': 3765,
'view_count': int,
'subtitles': 'mincount:1',
},
'params': {
'skip_download': True,
},
}, {
# with direct download links
'url': 'https://www.loc.gov/item/78710669/',
'info_dict': {
'id': '78710669',
'ext': 'mp4',
'title': 'La vie et la passion de Jesus-Christ',
'duration': 0,
'view_count': int,
'formats': 'mincount:4',
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
media_id = self._search_regex(
(r'id=(["\'])media-player-(?P<id>.+?)\1',
r'<video[^>]+id=(["\'])uuid-(?P<id>.+?)\1',
r'<video[^>]+data-uuid=(["\'])(?P<id>.+?)\1',
r'mediaObjectId\s*:\s*(["\'])(?P<id>.+?)\1'),
webpage, 'media id', group='id')
data = self._download_json(
'https://media.loc.gov/services/v1/media?id=%s&context=json' % media_id,
video_id)['mediaObject']
derivative = data['derivatives'][0]
media_url = derivative['derivativeUrl']
title = derivative.get('shortName') or data.get('shortName') or self._og_search_title(
webpage)
# Following algorithm was extracted from setAVSource js function
# found in webpage
media_url = media_url.replace('rtmp', 'https')
is_video = data.get('mediaType', 'v').lower() == 'v'
ext = determine_ext(media_url)
if ext not in ('mp4', 'mp3'):
media_url += '.mp4' if is_video else '.mp3'
if 'vod/mp4:' in media_url:
formats = [{
'url': media_url.replace('vod/mp4:', 'hls-vod/media/') + '.m3u8',
'format_id': 'hls',
'ext': 'mp4',
'protocol': 'm3u8_native',
'quality': 1,
}]
elif 'vod/mp3:' in media_url:
formats = [{
'url': media_url.replace('vod/mp3:', ''),
'vcodec': 'none',
}]
download_urls = set()
for m in re.finditer(
r'<option[^>]+value=(["\'])(?P<url>.+?)\1[^>]+data-file-download=[^>]+>\s*(?P<id>.+?)(?:(?: |\s+)\((?P<size>.+?)\))?\s*<', webpage):
format_id = m.group('id').lower()
if format_id == 'gif':
continue
download_url = m.group('url')
if download_url in download_urls:
continue
download_urls.add(download_url)
formats.append({
'url': download_url,
'format_id': format_id,
'filesize_approx': parse_filesize(m.group('size')),
})
self._sort_formats(formats)
duration = float_or_none(data.get('duration'))
view_count = int_or_none(data.get('viewCount'))
subtitles = {}
cc_url = data.get('ccUrl')
if cc_url:
subtitles.setdefault('en', []).append({
'url': cc_url,
'ext': 'ttml',
})
return {
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'view_count': view_count,
'formats': formats,
'subtitles': subtitles,
}
| gpl-2.0 |
nikste/tensorflow | tensorflow/python/debug/cli/tensor_format.py | 43 | 16359 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Format tensors (ndarrays) for screen display and navigation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import debugger_cli_common
_NUMPY_OMISSION = "...,"
_NUMPY_DEFAULT_EDGE_ITEMS = 3
_NUMBER_REGEX = re.compile(r"[-+]?([0-9][-+0-9eE\.]+|nan|inf)(\s|,|\])")
BEGIN_INDICES_KEY = "i0"
OMITTED_INDICES_KEY = "omitted"
DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR = "bold"
class HighlightOptions(object):
"""Options for highlighting elements of a tensor."""
def __init__(self,
criterion,
description=None,
font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):
"""Constructor of HighlightOptions.
Args:
criterion: (callable) A callable of the following signature:
def to_highlight(X):
# Args:
# X: The tensor to highlight elements in.
#
# Returns:
# (boolean ndarray) A boolean ndarray of the same shape as X
# indicating which elements are to be highlighted (iff True).
This callable will be used as the argument of np.argwhere() to
determine which elements of the tensor are to be highlighted.
description: (str) Description of the highlight criterion embodied by
criterion.
font_attr: (str) Font attribute to be applied to the
highlighted elements.
"""
self.criterion = criterion
self.description = description
self.font_attr = font_attr
def format_tensor(tensor,
tensor_label,
include_metadata=False,
np_printoptions=None,
highlight_options=None):
"""Generate a RichTextLines object showing a tensor in formatted style.
Args:
tensor: The tensor to be displayed, as a numpy ndarray or other
appropriate format (e.g., None representing uninitialized tensors).
tensor_label: A label for the tensor, as a string. If set to None, will
suppress the tensor name line in the return value.
include_metadata: Whether metadata such as dtype and shape are to be
included in the formatted text.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions() to set the text format for display numpy
ndarrays.
highlight_options: (HighlightOptions) options for highlighting elements
of the tensor.
Returns:
A RichTextLines object. Its annotation field has line-by-line markups to
indicate which indices in the array the first element of each line
corresponds to.
"""
lines = []
font_attr_segs = {}
if tensor_label is not None:
lines.append("Tensor \"%s\":" % tensor_label)
suffix = tensor_label.split(":")[-1]
if suffix.isdigit():
# Suffix is a number. Assume it is the output slot index.
font_attr_segs[0] = [(8, 8 + len(tensor_label), "bold")]
else:
# Suffix is not a number. It is auxiliary information such as the debug
# op type. In this case, highlight the suffix with a different color.
debug_op_len = len(suffix)
proper_len = len(tensor_label) - debug_op_len - 1
font_attr_segs[0] = [
(8, 8 + proper_len, "bold"),
(8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, "yellow")
]
if tensor is None:
if lines:
lines.append("")
lines.append("Uninitialized tensor")
return debugger_cli_common.RichTextLines(lines)
elif not isinstance(tensor, np.ndarray):
# If tensor is not a np.ndarray, return simple text-line representation of
# the object without annotations.
if lines:
lines.append("")
lines.extend(repr(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
if include_metadata:
lines.append(" dtype: %s" % str(tensor.dtype))
lines.append(" shape: %s" % str(tensor.shape))
if lines:
lines.append("")
hlines = len(lines)
# Apply custom string formatting options for numpy ndarray.
if np_printoptions is not None:
np.set_printoptions(**np_printoptions)
array_lines = repr(tensor).split("\n")
lines.extend(array_lines)
if tensor.dtype.type is not np.string_:
# Parse array lines to get beginning indices for each line.
# TODO(cais): Currently, we do not annotate string-type tensors due to
# difficulty in escaping sequences. Address this issue.
annotations = _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=np_printoptions, offset=hlines)
formatted = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
# Perform optional highlighting.
if highlight_options is not None:
indices_list = list(np.argwhere(highlight_options.criterion(tensor)))
total_elements = np.size(tensor)
highlight_summary = "Highlighted%s: %d of %d element(s) (%.2f%%)" % (
"(%s)" % highlight_options.description if highlight_options.description
else "", len(indices_list), total_elements,
len(indices_list) / float(total_elements) * 100.0)
formatted.lines[0] += " " + highlight_summary
if indices_list:
indices_list = [list(indices) for indices in indices_list]
are_omitted, rows, start_cols, end_cols = locate_tensor_element(
formatted, indices_list)
for is_omitted, row, start_col, end_col in zip(are_omitted, rows,
start_cols, end_cols):
if is_omitted or start_col is None or end_col is None:
continue
if row in formatted.font_attr_segs:
formatted.font_attr_segs[row].append(
(start_col, end_col, highlight_options.font_attr))
else:
formatted.font_attr_segs[row] = [(start_col, end_col,
highlight_options.font_attr)]
return formatted
def _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=None, offset=0):
"""Generate annotations for line-by-line begin indices of tensor text.
Parse the numpy-generated text representation of a numpy ndarray to
determine the indices of the first element of each text line (if any
element is present in the line).
For example, given the following multi-line ndarray text representation:
["array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])"]
the generate annotation will be:
{0: {BEGIN_INDICES_KEY: [0, 0]},
1: {BEGIN_INDICES_KEY: [1, 0]},
2: {BEGIN_INDICES_KEY: [2, 0]},
3: {BEGIN_INDICES_KEY: [3, 0]}}
Args:
array_lines: Text lines representing the tensor, as a list of str.
tensor: The tensor being formatted as string.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions().
offset: Line number offset applied to the line indices in the returned
annotation.
Returns:
An annotation as a dict.
"""
if np_printoptions and "edgeitems" in np_printoptions:
edge_items = np_printoptions["edgeitems"]
else:
edge_items = _NUMPY_DEFAULT_EDGE_ITEMS
annotations = {}
# Put metadata about the tensor in the annotations["tensor_metadata"].
annotations["tensor_metadata"] = {
"dtype": tensor.dtype, "shape": tensor.shape}
dims = np.shape(tensor)
ndims = len(dims)
if ndims == 0:
# No indices for a 0D tensor.
return annotations
curr_indices = [0] * len(dims)
curr_dim = 0
for i in xrange(len(array_lines)):
line = array_lines[i].strip()
if not line:
# Skip empty lines, which can appear for >= 3D arrays.
continue
if line == _NUMPY_OMISSION:
annotations[offset + i] = {OMITTED_INDICES_KEY: copy.copy(curr_indices)}
curr_indices[curr_dim - 1] = dims[curr_dim - 1] - edge_items
else:
num_lbrackets = line.count("[") # TODO(cais): String array escaping.
num_rbrackets = line.count("]")
curr_dim += num_lbrackets - num_rbrackets
annotations[offset + i] = {BEGIN_INDICES_KEY: copy.copy(curr_indices)}
if num_rbrackets == 0:
line_content = line[line.rfind("[") + 1:]
num_elements = line_content.count(",")
curr_indices[curr_dim - 1] += num_elements
else:
if curr_dim > 0:
curr_indices[curr_dim - 1] += 1
for k in xrange(curr_dim, ndims):
curr_indices[k] = 0
return annotations
def locate_tensor_element(formatted, indices):
"""Locate a tensor element in formatted text lines, given element indices.
Given a RichTextLines object representing a tensor and indices of the sought
element, return the row number at which the element is located (if exists).
Args:
formatted: A RichTextLines object containing formatted text lines
representing the tensor.
indices: Indices of the sought element, as a list of int or a list of list
of int. The former case is for a single set of indices to look up,
whereas the latter case is for looking up a batch of indices sets at once.
In the latter case, the indices must be in ascending order, or a
ValueError will be raised.
Returns:
1) A boolean indicating whether the element falls into an omitted line.
2) Row index.
3) Column start index, i.e., the first column in which the representation
of the specified tensor starts, if it can be determined. If it cannot
be determined (e.g., due to ellipsis), None.
4) Column end index, i.e., the column right after the last column that
represents the specified tensor. Iff it cannot be determined, None.
For return values described above are based on a single set of indices to
look up. In the case of batch mode (multiple sets of indices), the return
values will be lists of the types described above.
Raises:
AttributeError: If:
Input argument "formatted" does not have the required annotations.
ValueError: If:
1) Indices do not match the dimensions of the tensor, or
2) Indices exceed sizes of the tensor, or
3) Indices contain negative value(s).
4) If in batch mode, and if not all sets of indices are in ascending
order.
"""
if isinstance(indices[0], list):
indices_list = indices
input_batch = True
else:
indices_list = [indices]
input_batch = False
# Check that tensor_metadata is available.
if "tensor_metadata" not in formatted.annotations:
raise AttributeError("tensor_metadata is not available in annotations.")
# Sanity check on input argument.
_validate_indices_list(indices_list, formatted)
dims = formatted.annotations["tensor_metadata"]["shape"]
batch_size = len(indices_list)
lines = formatted.lines
annot = formatted.annotations
prev_r = 0
prev_line = ""
prev_indices = [0] * len(dims)
# Initialize return values
are_omitted = [None] * batch_size
row_indices = [None] * batch_size
start_columns = [None] * batch_size
end_columns = [None] * batch_size
batch_pos = 0 # Current position in the batch.
for r in xrange(len(lines)):
if r not in annot:
continue
if BEGIN_INDICES_KEY in annot[r]:
indices_key = BEGIN_INDICES_KEY
elif OMITTED_INDICES_KEY in annot[r]:
indices_key = OMITTED_INDICES_KEY
matching_indices_list = [
ind for ind in indices_list[batch_pos:]
if prev_indices <= ind < annot[r][indices_key]
]
if matching_indices_list:
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
batch_pos += num_matches
if batch_pos >= batch_size:
break
prev_r = r
prev_line = lines[r]
prev_indices = annot[r][indices_key]
if batch_pos < batch_size:
matching_indices_list = indices_list[batch_pos:]
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
if input_batch:
return are_omitted, row_indices, start_columns, end_columns
else:
return are_omitted[0], row_indices[0], start_columns[0], end_columns[0]
def _validate_indices_list(indices_list, formatted):
prev_ind = None
for ind in indices_list:
# Check indices match tensor dimensions.
dims = formatted.annotations["tensor_metadata"]["shape"]
if len(ind) != len(dims):
raise ValueError("Dimensions mismatch: requested: %d; actual: %d" %
(len(ind), len(dims)))
# Check indices is within size limits.
for req_idx, siz in zip(ind, dims):
if req_idx >= siz:
raise ValueError("Indices exceed tensor dimensions.")
if req_idx < 0:
raise ValueError("Indices contain negative value(s).")
# Check indices are in ascending order.
if prev_ind and ind < prev_ind:
raise ValueError("Input indices sets are not in ascending order.")
prev_ind = ind
def _locate_elements_in_line(line, indices_list, ref_indices):
"""Determine the start and end indices of an element in a line.
Args:
line: (str) the line in which the element is to be sought.
indices_list: (list of list of int) list of indices of the element to
search for. Assumes that the indices in the batch are unique and sorted
in ascending order.
ref_indices: (list of int) reference indices, i.e., the indices of the
first element represented in the line.
Returns:
start_columns: (list of int) start column indices, if found. If not found,
None.
end_columns: (list of int) end column indices, if found. If not found,
None.
If found, the element is represented in the left-closed-right-open interval
[start_column, end_column].
"""
batch_size = len(indices_list)
offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]
start_columns = [None] * batch_size
end_columns = [None] * batch_size
if _NUMPY_OMISSION in line:
ellipsis_index = line.find(_NUMPY_OMISSION)
else:
ellipsis_index = len(line)
matches_iter = re.finditer(_NUMBER_REGEX, line)
batch_pos = 0
offset_counter = 0
for match in matches_iter:
if match.start() > ellipsis_index:
# Do not attempt to search beyond ellipsis.
break
if offset_counter == offsets[batch_pos]:
start_columns[batch_pos] = match.start()
# Remove the final comma, right bracket, or whitespace.
end_columns[batch_pos] = match.end() - 1
batch_pos += 1
if batch_pos >= batch_size:
break
offset_counter += 1
return start_columns, end_columns
| apache-2.0 |
jeremiahmarks/sl4a | python/gdata/src/gdata/tlslite/integration/IMAP4_TLS.py | 319 | 5145 | """TLS Lite + imaplib."""
import socket
from imaplib import IMAP4
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
# IMAP TLS PORT
IMAP4_TLS_PORT = 993
class IMAP4_TLS(IMAP4, ClientHelper):
"""This class extends L{imaplib.IMAP4} with TLS support."""
def __init__(self, host = '', port = IMAP4_TLS_PORT,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new IMAP4_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
IMAP4.__init__(self, host, port)
def open(self, host = '', port = IMAP4_TLS_PORT):
"""Setup connection to remote server on "host:port".
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sock = TLSConnection(self.sock)
self.sock.closeSocket = True
ClientHelper._handshake(self, self.sock)
self.file = self.sock.makefile('rb')
| apache-2.0 |
ajaali/django | tests/gis_tests/geo3d/models.py | 302 | 1294 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City3D(NamedModel):
point = models.PointField(dim=3)
class Interstate2D(NamedModel):
line = models.LineStringField(srid=4269)
class Interstate3D(NamedModel):
line = models.LineStringField(dim=3, srid=4269)
class InterstateProj2D(NamedModel):
line = models.LineStringField(srid=32140)
class InterstateProj3D(NamedModel):
line = models.LineStringField(dim=3, srid=32140)
class Polygon2D(NamedModel):
poly = models.PolygonField(srid=32140)
class Polygon3D(NamedModel):
poly = models.PolygonField(dim=3, srid=32140)
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
class Point2D(SimpleModel):
point = models.PointField()
class Point3D(SimpleModel):
point = models.PointField(dim=3)
class MultiPoint3D(SimpleModel):
mpoint = models.MultiPointField(dim=3)
| bsd-3-clause |
x2nie/odoo | openerp/osv/fields.py | 14 | 69520 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import functools
import logging
import pytz
import re
import xmlrpclib
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_round, float_repr
from openerp.tools import html_sanitize
import simplejson
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_prefetch = True
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
_deprecated = False
copy = True # whether the field is copied by BaseModel.copy()
def __init__(self, string='unknown', required=False, readonly=False, domain=None, context=None, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
if domain is None:
domain = []
if context is None:
context = {}
self.states = states or {}
self.string = string
self.readonly = readonly
self.required = required
self.size = size
self.help = args.get('help', '')
self.priority = priority
self.change_default = change_default
self.ondelete = ondelete.lower() if ondelete else None # defaults to 'set null' in ORM
self.translate = translate
self._domain = domain
self._context = context
self.write = False
self.read = False
self.select = select
self.manual = manual
self.selectable = True
self.group_operator = args.get('group_operator', False)
self.groups = False # CSV list of ext IDs of groups that can access this field
self.deprecated = False # Optional deprecation warning
for a in args:
setattr(self, a, args[a])
# prefetch only if self._classic_write, not self.groups, and not
# self.deprecated
if not self._classic_write or self.deprecated:
self._prefetch = False
def to_field(self):
""" convert column `self` to a new-style field """
from openerp.fields import Field
return Field.by_type[self._type](**self.to_field_args())
def to_field_args(self):
""" return a dictionary with all the arguments to pass to the field """
items = [
('_origin', self), # field interfaces self
('copy', self.copy),
('index', self.select),
('manual', self.manual),
('string', self.string),
('help', self.help),
('readonly', self.readonly),
('required', self.required),
('states', self.states),
('groups', self.groups),
('size', self.size),
('ondelete', self.ondelete),
('translate', self.translate),
('domain', self._domain),
('context', self._context),
('change_default', self.change_default),
('deprecated', self.deprecated),
]
return dict(item for item in items if item[1])
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = bool
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
def __init__(self, string, selection, size=None, **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, size=size, selection=selection, **args)
def to_field_args(self):
args = super(reference, self).to_field_args()
args['selection'] = self.selection
return args
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
names = model.name_get(cr, uid, [int(res_id)], context=context)
return names[0][1] if names else False
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class char(_column):
_type = 'char'
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
# self._symbol_set_char defined to keep the backward compatibility
self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
class html(text):
_type = 'html'
_symbol_c = '%s'
def _symbol_set_html(self, value):
if value is None or value is False:
return None
if not self._sanitize:
return value
return html_sanitize(value)
def __init__(self, string='unknown', sanitize=True, **args):
super(html, self).__init__(string=string, **args)
self._sanitize = sanitize
# symbol_set redefinition because of sanitize specific behavior
self._symbol_f = self._symbol_set_html
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(html, self).to_field_args()
args['sanitize'] = self._sanitize
return args
import __builtin__
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_f = lambda x: __builtin__.float(x or 0.0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0.0
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
self.digits = digits
# synopsis: digits_compute(cr) -> (precision, scale)
self.digits_compute = digits_compute
def to_field_args(self):
args = super(float, self).to_field_args()
args['digits'] = self.digits_compute or self.digits
return args
def digits_change(self, cr):
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
class date(_column):
_type = 'date'
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
user = model.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def date_to_datetime(model, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
user_date = DT.datetime.strptime(userdate, tools.DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + DT.timedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
class datetime(_column):
_type = 'datetime'
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
user = registry['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
class binary(_column):
_type = 'binary'
_symbol_c = '%s'
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
_classic_read = False
_prefetch = False
def __init__(self, string='unknown', filters=None, **args):
_column.__init__(self, string=string, **args)
self.filters = filters
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if not context:
context = {}
if not values:
values = []
res = {}
for i in ids:
val = None
for v in values:
if v['id'] == i:
val = v[name]
break
# If client is requesting only the size of the field, we return it instead
# of the content. Presumably a separate request will be done to read the actual
# content if it's needed at some point.
# TODO: after 6.0 we should consider returning a dict with size and content instead of
# having an implicit convention for the value
if val and context.get('bin_size_%s' % name, context.get('bin_size')):
res[i] = tools.human_size(long(val))
else:
res[i] = val
return res
class selection(_column):
_type = 'selection'
def __init__(self, selection, string='unknown', **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, **args)
self.selection = selection
def to_field_args(self):
args = super(selection, self).to_field_args()
args['selection'] = self.selection
return args
@classmethod
def reify(cls, cr, uid, model, field, context=None):
""" Munges the field's ``selection`` attribute as necessary to get
something useable out of it: calls it if it's a function, applies
translations to labels if it's not.
A callable ``selection`` is considered translated on its own.
:param orm.Model model:
:param _column field:
"""
if callable(field.selection):
return field.selection(model, cr, uid, context)
if not (context and 'lang' in context):
return field.selection
# field_to_dict isn't given a field name, only a field object, we
# need to get the name back in order to perform the translation lookup
field_name = next(
name for name, column in model._columns.iteritems()
if column == field)
translation_filter = "%s,%s" % (model._name, field_name)
translate = functools.partial(
model.pool['ir.translation']._get_source,
cr, uid, translation_filter, 'selection', context['lang'])
return [
(value, translate(label))
for value, label in field.selection
]
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
def __init__(self, obj, string='unknown', auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def to_field_args(self):
args = super(many2one, self).to_field_args()
args['comodel_name'] = self._obj
args['auto_join'] = self._auto_join
return args
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool[self._obj]
self._table = obj._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'one2many'
# one2many columns are not copied by default
copy = False
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def to_field_args(self):
args = super(one2many, self).to_field_args()
args['comodel_name'] = self._obj
args['inverse_name'] = self._fields_id
args['auto_join'] = self._auto_join
args['limit'] = self._limit
return args
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if self._context:
context = dict(context or {})
context.update(self._context)
# retrieve the records in the comodel
comodel = obj.pool[self._obj].browse(cr, user, [], context)
inverse = self._fields_id
domain = self._domain(obj) if callable(self._domain) else self._domain
domain = domain + [(inverse, 'in', ids)]
records = comodel.search(domain, limit=self._limit)
result = {id: [] for id in ids}
# read the inverse of records without prefetching other fields on them
for record in records.with_context(prefetch_fields=False):
# record[inverse] may be a record or an integer
result[int(record[inverse])].append(record.id)
return result
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
context = dict(context or {})
context.update(self._context)
context['recompute'] = False # recomputation is done by outer create/write
if not values:
return
obj = obj.pool[self._obj]
_table = obj._table
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# table of the field (parent_model in case of inherit)
field_model = self._fields_id in obj.pool[self._obj]._columns and self._obj or obj.pool[self._obj]._all_columns[self._fields_id].parent_model
field_table = obj.pool[field_model]._table
cr.execute("select 1 from {0} where id=%s and {1}=%s".format(field_table, self._fields_id), (act[1], id))
if not cr.fetchone():
# Must use write() to recompute parent_store structure if needed and check access rules
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
reverse_rel = obj._all_columns.get(self._fields_id)
assert reverse_rel, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if reverse_rel.column.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool[self._obj].name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'many2many'
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
def to_field_args(self):
args = super(many2many, self).to_field_args()
args['comodel_name'] = self._obj
args['relation'] = self._rel
args['column1'] = self._id1
args['column2'] = self._id2
args['limit'] = self._limit
return args
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool[self._obj]
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
FROM %(rel)s, %(from_c)s \
WHERE %(rel)s.%(id1)s IN %%s \
AND %(rel)s.%(id2)s = %(tbl)s.id \
%(where_c)s \
%(order_by)s \
%(limit)s \
OFFSET %(offset)d' \
% values
return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool[self._obj]
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
from_c, where_c, where_params = wquery.get_sql()
if where_c:
where_c = ' AND ' + where_c
order_by = ' ORDER BY "%s".%s' %(obj._table, obj._order.split(',')[0])
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}, where_params)
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_classic_read = False
_classic_write = False
_prefetch = False
_type = 'function'
_properties = True
# function fields are not copied by default
copy = False
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
_column.__init__(self, **args)
self._obj = obj
self._fnct = fnct
self._fnct_inv = fnct_inv
self._arg = arg
self._multi = multi
if 'relation' in args:
self._obj = args['relation']
self.digits = args.get('digits', (16,2))
self.digits_compute = args.get('digits_compute', None)
if callable(args.get('selection')):
from openerp import api
self.selection = api.expected(api.cr_uid_context, args['selection'])
self._fnct_inv_arg = fnct_inv_arg
if not fnct_inv:
self.readonly = 1
self._type = type
self._fnct_search = fnct_search
self.store = store
if not fnct_search and not store:
self.selectable = False
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
else:
self._prefetch = True
if type == 'char':
self._symbol_c = char._symbol_c
self._symbol_f = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
else:
type_class = globals().get(type)
if type_class is not None:
self._symbol_c = type_class._symbol_c
self._symbol_f = type_class._symbol_f
self._symbol_set = type_class._symbol_set
def to_field_args(self):
args = super(function, self).to_field_args()
if self._type in ('float',):
args['digits'] = self.digits_compute or self.digits
elif self._type in ('selection', 'reference'):
args['selection'] = self.selection
elif self._type in ('many2one', 'one2many', 'many2many'):
args['comodel_name'] = self._obj
return args
def digits_change(self, cr):
if self._type == 'float':
if self.digits_compute:
self.digits = self.digits_compute(cr)
if self.digits:
precision, scale = self.digits
self._symbol_set = ('%s', lambda x: float_repr(float_round(__builtin__.float(x or 0.0),
precision_digits=scale),
precision_digits=scale))
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
return self._postprocess_batch(cr, uid, obj, field, {0: value}, context=context)[0]
def _postprocess_batch(self, cr, uid, obj, field, values, context=None):
if not values:
return values
if context is None:
context = {}
field_type = obj._columns[field]._type
new_values = dict(values)
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
for rid, value in values.iteritems():
if value:
new_values[rid] = get_nice_size(value)
elif not context.get('bin_raw'):
for rid, value in values.iteritems():
if value:
new_values[rid] = sanitize_binary_value(value)
return new_values
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
multi = self._multi
# if we already have a value, don't recompute it.
# This happen if case of stored many2one fields
if values and not multi and name in values[0]:
result = dict((v['id'], v[name]) for v in values)
elif values and multi and all(n in values[0] for n in name):
result = dict((v['id'], dict((n, v[n]) for n in name)) for v in values)
else:
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
if multi:
swap = {}
for rid, values in result.iteritems():
for f, v in values.iteritems():
if f not in name:
continue
swap.setdefault(f, {})[rid] = v
for field, values in swap.iteritems():
new_values = self._postprocess_batch(cr, uid, obj, field, values, context)
for rid, value in new_values.iteritems():
result[rid][field] = value
else:
result = self._postprocess_batch(cr, uid, obj, name, result, context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _fnct_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for instance in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
instance = instance[field][:1]
if instance:
# write on the last field of the target record
instance.write({self.arg[-1]: values})
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
# traverse all fields except the last one
for field in self.arg[:-1]:
value = value[field][:1]
# read the last field on the target record
res[record.id] = value[self.arg[-1]]
if self._type == 'many2one':
# res[id] is a recordset; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool[self._obj].name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, bool(value) and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a recordset; convert it to a list of ids
res = dict((id, value.ids) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=self._fnct_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool[self.relation]
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _fnct_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _fnct_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool[obj._columns[field_name].relation]
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool[obj._columns[field_name].relation]
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._fnct_read, fnct_inv=self._fnct_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
def _fnct_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _fnct_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _fnct_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._fnct_read, arg, self._fnct_write, fnct_inv_arg=arg, fnct_search=self._fnct_search, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
def _symbol_set_struct(val):
return simplejson.dumps(val)
def _symbol_get_struct(self, val):
return simplejson.loads(val or '{}')
_prefetch = False
_type = 'serialized'
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
# TODO: review completly this class for speed improvement
class property(function):
def to_field_args(self):
args = super(property, self).to_field_args()
args['company_dependent'] = True
return args
def _fnct_search(self, tobj, cr, uid, obj, name, domain, context=None):
ir_property = obj.pool['ir.property']
result = []
for field, operator, value in domain:
result += ir_property.search_multi(cr, uid, name, tobj._name, operator, value, context=context)
return result
def _fnct_write(self, obj, cr, uid, id, prop_name, value, obj_dest, context=None):
ir_property = obj.pool['ir.property']
ir_property.set_multi(cr, uid, prop_name, obj._name, {id: value}, context=context)
return True
def _fnct_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
ir_property = obj.pool['ir.property']
res = {id: {} for id in ids}
for prop_name in prop_names:
column = obj._all_columns[prop_name].column
values = ir_property.get_multi(cr, uid, prop_name, obj._name, ids, context=context)
if column._type == 'many2one':
# name_get the non-null values as SUPERUSER_ID
vals = sum(set(filter(None, values.itervalues())),
obj.pool[column._obj].browse(cr, uid, [], context=context))
vals_name = dict(vals.sudo().name_get()) if vals else {}
for id, value in values.iteritems():
ng = False
if value and value.id in vals_name:
ng = value.id, vals_name[value.id]
res[id][prop_name] = ng
else:
for id, value in values.iteritems():
res[id][prop_name] = value
return res
def __init__(self, **args):
if 'view_load' in args:
_logger.warning("view_load attribute is deprecated on ir.fields. Args: %r", args)
args = dict(args)
args['obj'] = args.pop('relation', '') or args.get('obj', '')
super(property, self).__init__(
fnct=self._fnct_read,
fnct_inv=self._fnct_write,
fnct_search=self._fnct_search,
multi='properties',
**args
)
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zofuthan/edx-platform | cms/djangoapps/contentstore/tests/test_orphan.py | 77 | 4598 | """
Test finding orphans via the view and django config
"""
import json
from contentstore.tests.utils import CourseTestCase
from student.models import CourseEnrollment
from contentstore.utils import reverse_course_url
class TestOrphanBase(CourseTestCase):
"""
Base class for Studio tests that require orphaned modules
"""
def setUp(self):
super(TestOrphanBase, self).setUp()
# create chapters and add them to course tree
chapter1 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter1")
self.store.publish(chapter1.location, self.user.id)
chapter2 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter2")
self.store.publish(chapter2.location, self.user.id)
# orphan chapter
orphan_chapter = self.store.create_item(self.user.id, self.course.id, 'chapter', "OrphanChapter")
self.store.publish(orphan_chapter.location, self.user.id)
# create vertical and add it as child to chapter1
vertical1 = self.store.create_child(self.user.id, chapter1.location, 'vertical', "Vertical1")
self.store.publish(vertical1.location, self.user.id)
# create orphan vertical
orphan_vertical = self.store.create_item(self.user.id, self.course.id, 'vertical', "OrphanVert")
self.store.publish(orphan_vertical.location, self.user.id)
# create component and add it to vertical1
html1 = self.store.create_child(self.user.id, vertical1.location, 'html', "Html1")
self.store.publish(html1.location, self.user.id)
# create component and add it as a child to vertical1 and orphan_vertical
multi_parent_html = self.store.create_child(self.user.id, vertical1.location, 'html', "multi_parent_html")
self.store.publish(multi_parent_html.location, self.user.id)
orphan_vertical.children.append(multi_parent_html.location)
self.store.update_item(orphan_vertical, self.user.id)
# create an orphaned html module
orphan_html = self.store.create_item(self.user.id, self.course.id, 'html', "OrphanHtml")
self.store.publish(orphan_html.location, self.user.id)
self.store.create_child(self.user.id, self.course.location, 'static_tab', "staticuno")
self.store.create_child(self.user.id, self.course.location, 'about', "overview")
self.store.create_child(self.user.id, self.course.location, 'course_info', "updates")
class TestOrphan(TestOrphanBase):
"""
Test finding orphans via view and django config
"""
def setUp(self):
super(TestOrphan, self).setUp()
self.orphan_url = reverse_course_url('orphan_handler', self.course.id)
def test_mongo_orphan(self):
"""
Test that old mongo finds the orphans
"""
orphans = json.loads(
self.client.get(
self.orphan_url,
HTTP_ACCEPT='application/json'
).content
)
self.assertEqual(len(orphans), 3, "Wrong # {}".format(orphans))
location = self.course.location.replace(category='chapter', name='OrphanChapter')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='vertical', name='OrphanVert')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='html', name='OrphanHtml')
self.assertIn(location.to_deprecated_string(), orphans)
def test_mongo_orphan_delete(self):
"""
Test that old mongo deletes the orphans
"""
self.client.delete(self.orphan_url)
orphans = json.loads(
self.client.get(self.orphan_url, HTTP_ACCEPT='application/json').content
)
self.assertEqual(len(orphans), 0, "Orphans not deleted {}".format(orphans))
# make sure that any children with one orphan parent and one non-orphan
# parent are not deleted
self.assertTrue(self.store.has_item(self.course.id.make_usage_key('html', "multi_parent_html")))
def test_not_permitted(self):
"""
Test that auth restricts get and delete appropriately
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.get(self.orphan_url)
self.assertEqual(response.status_code, 403)
response = test_user_client.delete(self.orphan_url)
self.assertEqual(response.status_code, 403)
| agpl-3.0 |
zBMNForks/graphite-web | webapp/tests/test_storage.py | 34 | 1383 | import logging
from graphite.storage import Store
from django.conf import settings
from django.test import TestCase
# Silence logging during tests
LOGGER = logging.getLogger()
# logging.NullHandler is a python 2.7ism
if hasattr(logging, "NullHandler"):
LOGGER.addHandler(logging.NullHandler())
class StorageTest(TestCase):
def test_store(self):
# Save settings
old_cluster_servers = settings.CLUSTER_SERVERS
old_remote_exclude_local = settings.REMOTE_EXCLUDE_LOCAL
# Set test cluster servers
settings.CLUSTER_SERVERS = ['127.0.0.1', '8.8.8.8']
# Test REMOTE_EXCLUDE_LOCAL = False
settings.REMOTE_EXCLUDE_LOCAL = False
test_store = Store()
remote_hosts = [remote_store.host for remote_store in test_store.remote_stores]
self.assertTrue('127.0.0.1' in remote_hosts)
self.assertTrue('8.8.8.8' in remote_hosts)
# Test REMOTE_EXCLUDE_LOCAL = True
settings.REMOTE_EXCLUDE_LOCAL = True
test_store = Store()
remote_hosts = [remote_store.host for remote_store in test_store.remote_stores]
self.assertTrue('127.0.0.1' not in remote_hosts)
self.assertTrue('8.8.8.8' in remote_hosts)
# Restore original settings
settings.CLUSTER_SERVERS = old_cluster_servers
settings.REMOTE_EXCLUDE_LOCAL = old_remote_exclude_local
| apache-2.0 |
gavinandresen/bitcoin-git | qa/rpc-tests/getchaintips.py | 101 | 2205 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| mit |
CameronLonsdale/sec-tools | python2/lib/python2.7/site-packages/pip/_vendor/html5lib/_tokenizer.py | 385 | 76580 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import unichr as chr
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| mit |
simartin/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/android_webview.py | 1 | 3825 | from .base import require_arg
from .base import get_timeout_multiplier # noqa: F401
from .chrome import executor_kwargs as chrome_executor_kwargs
from .chrome_android import ChromeAndroidBrowserBase
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
from ..executors.executorchrome import ChromeDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "android_webview",
"check_args": "check_args",
"browser": "SystemWebViewShell",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "ChromeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier"}
_wptserve_ports = set()
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"device_serial": kwargs["device_serial"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(logger, test_type, server_config, cache_manager, run_info_data,
**kwargs):
# Use update() to modify the global list in place.
_wptserve_ports.update(set(
server_config['ports']['http'] + server_config['ports']['https'] +
server_config['ports']['ws'] + server_config['ports']['wss']
))
executor_kwargs = chrome_executor_kwargs(logger, test_type, server_config,
cache_manager, run_info_data,
**kwargs)
del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
capabilities = executor_kwargs["capabilities"]
# Note that for WebView, we launch a test shell and have the test shell use WebView.
# https://chromium.googlesource.com/chromium/src/+/HEAD/android_webview/docs/webview-shell.md
capabilities["goog:chromeOptions"]["androidPackage"] = \
"org.chromium.webview_shell"
capabilities["goog:chromeOptions"]["androidActivity"] = ".WebPlatformTestsActivity"
if kwargs.get('device_serial'):
capabilities["goog:chromeOptions"]["androidDeviceSerial"] = kwargs['device_serial']
# Workaround: driver.quit() cannot quit SystemWebViewShell.
executor_kwargs["pause_after_test"] = False
# Workaround: driver.close() is not supported.
executor_kwargs["restart_after_test"] = True
executor_kwargs["close_after_done"] = False
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
# allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
return {"server_host": "127.0.0.1"}
class SystemWebViewShell(ChromeAndroidBrowserBase):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver",
remote_queue=None,
device_serial=None,
webdriver_args=None):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
super(SystemWebViewShell, self).__init__(logger,
webdriver_binary, remote_queue, device_serial, webdriver_args)
self.binary = binary
self.wptserver_ports = _wptserve_ports
| mpl-2.0 |
Integral-Technology-Solutions/ConfigNOW | Lib/dumbdbm.py | 4 | 4248 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
_os = __import__('os')
import __builtin__
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database:
def __init__(self, file):
if _os.sep == '.':
endsep = '/'
else:
endsep = '.'
self._dirfile = file + endsep + 'dir'
self._datfile = file + endsep + 'dat'
self._bakfile = file + endsep + 'bak'
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = f.readline().rstrip()
if not line: break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try: _os.unlink(self._bakfile)
except _os.error: pass
try: _os.rename(self._dirfile, self._bakfile)
except _os.error: pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in self._index.items():
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
## Does not work under MW compiler
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
## f.seek(pos)
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, (pos, siz)):
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if not self._index.has_key(key):
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return self._index.has_key(key)
def __len__(self):
return len(self._index)
def close(self):
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def open(file, flag = None, mode = None):
# flag, mode arguments are currently ignored
return _Database(file)
| mit |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/keras/python/keras/utils/generic_utils.py | 21 | 10625 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import marshal
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with CustomObjectScope({"MyObject":MyObject}):
layer = Dense(..., W_regularizer="MyObject")
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({"MyObject":MyObject}):
layer = Dense(..., W_regularizer="MyObject")
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()["MyObject"] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_object(instance):
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getargspec(cls.from_config)
if 'custom_objects' in arg_spec.args:
custom_objects = custom_objects or {}
return cls.from_config(
config['config'],
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name,
':' + function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' + printable_module_name +
': ' + identifier)
def make_tuple(*args):
return args
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
code = marshal.loads(code.encode('raw_unicode_escape'))
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, force=False):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
force: Whether to force visual progress update.
"""
values = values or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [
v * (current - self.seen_so_far), current - self.seen_so_far
]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
if not force and (now - self.last_update) < self.interval:
return
prev_total_width = self.total_width
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if isinstance(self.sum_values[k], list):
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * ' ')
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write('\n')
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
sys.stdout.write(info + '\n')
self.last_update = now
def add(self, n, values=None):
self.update(self.seen_so_far + n, values)
| mit |
yalp/fiware-orion | scripts/managedb/garbage-collector.py | 12 | 3881 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# Copyright 2013 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
from pymongo import MongoClient
from time import time
from datetime import timedelta, datetime
from sys import argv
def check_coll(collection, collection_name):
n = 0
for doc in collection.find():
id = doc['_id']
if collection_name == CSUB_COLL or collection_name == CASUB_COLL:
ref = doc['reference']
prefix = '-- ID ' + str(id) + ' (' + ref + '): '
elif collection_name == REG_COLL:
# Note that registration could include several entities, but we only
# print the first one (and a '+' sign) in that case, to avoid long lines
l_c = len (doc['contextRegistration'])
l_e = len (doc['contextRegistration'][0]['entities'])
entId = doc['contextRegistration'][0]['entities'][0]['id']
if (doc['contextRegistration'][0]['entities'][0]).has_key('type'):
type = doc['contextRegistration'][0]['entities'][0]['type']
else:
type = '<no type>'
if (l_c > 1) or (l_e > 1):
prefix = '-- ID ' + str(id) + ' ' + entId + ' (' + type + ') [+] : '
else:
prefix = '-- ID ' + str(id) + ' ' + entId + ' (' + type + '): '
else:
prefix = '-- ID ' + str(id) + ': '
n += 1
try:
expiration = int(doc['expiration'])
interval = expiration - time()
if (interval < 0):
interval_str = str(timedelta(seconds=-interval))
print prefix + 'expired by ' + interval_str
doc['expired'] = 1
collection.save(doc)
else:
# In this case, we touch the document only if have already expired: 1,
# this would correspond to the case of an expired registration/subscription that has
# been "reactivated" after receiving an update in duration
if doc.has_key('expired'):
doc.pop('expired', None)
collection.save(doc)
interval_str = str(timedelta(seconds=interval))
print prefix + interval_str + ' left to expiration'
except ValueError:
print prefix + 'invalid expiration format!'
print 'document processed: ' + str(n)
DB = 'orion'
REG_COLL = 'registrations'
CSUB_COLL = 'csubs'
CASUB_COLL = 'casubs'
client = MongoClient('localhost', 27017)
db = client[DB]
now = datetime.now()
print 'Current time: ' + str(now)
# The scripts uses a list of collection as argument, so a given collection is
# processed only if its name appears there
if REG_COLL in argv:
print 'Checking collection: ' + REG_COLL
check_coll(db[REG_COLL], REG_COLL)
if CSUB_COLL in argv:
print 'Checking collection: ' + CSUB_COLL
check_coll(db[CSUB_COLL], CSUB_COLL)
if CASUB_COLL in argv:
print 'Checking collection: ' + CASUB_COLL
check_coll(db[CASUB_COLL], CASUB_COLL)
| agpl-3.0 |
Anonymike/pasta-bot | plugins/_junk.py | 3 | 9285 | from util import hook, user, database
import os
import sys
import re
import json
import time
import subprocess
# @hook.command(autohelp=False, permissions=["permissions_users"], adminonly=True)
# def permissions(inp, bot=None, notice=None):
# """permissions [group] -- lists the users and their permission level who have permissions."""
# permissions = bot.config.get("permissions", [])
# groups = []
# if inp:
# for k in permissions:
# if inp == k:
# groups.append(k)
# else:
# for k in permissions:
# groups.append(k)
# if not groups:
# notice(u"{} is not a group with permissions".format(inp))
# return None
# for v in groups:
# members = ""
# for value in permissions[v]["users"]:
# members = members + value + ", "
# if members:
# notice(u"the members in the {} group are..".format(v))
# notice(members[:-2])
# else:
# notice(u"there are no members in the {} group".format(v))
# @hook.command(permissions=["permissions_users"], adminonly=True)
# def deluser(inp, bot=None, notice=None):
# """deluser [user] [group] -- removes elevated permissions from [user].
# If [group] is specified, they will only be removed from [group]."""
# permissions = bot.config.get("permissions", [])
# inp = inp.split(" ")
# groups = []
# try:
# specgroup = inp[1]
# except IndexError:
# specgroup = None
# for k in permissions:
# groups.append(k)
# else:
# for k in permissions:
# if specgroup == k:
# groups.append(k)
# if not groups:
# notice(u"{} is not a group with permissions".format(inp[1]))
# return None
# removed = 0
# for v in groups:
# users = permissions[v]["users"]
# for value in users:
# if inp[0] == value:
# users.remove(inp[0])
# removed = 1
# notice(u"{} has been removed from the group {}".format(inp[0], v))
# json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
# if specgroup:
# if removed == 0:
# notice(u"{} is not in the group {}".format(inp[0], specgroup))
# else:
# if removed == 0:
# notice(u"{} is not in any groups".format(inp[0]))
# @hook.command(permissions=["permissions_users"], adminonly=True)
# def adduser(inp, bot=None, notice=None):
# """adduser [user] [group] -- adds elevated permissions to [user].
# [group] must be specified."""
# permissions = bot.config.get("permissions", [])
# inp = inp.split(" ")
# try:
# user = inp[0]
# targetgroup = inp[1]
# except IndexError:
# notice(u"the group must be specified")
# return None
# if not re.search('.+!.+@.+', user):
# notice(u"the user must be in the form of \"nick!user@host\"")
# return None
# try:
# users = permissions[targetgroup]["users"]
# except KeyError:
# notice(u"no such group as {}".format(targetgroup))
# return None
# if user in users:
# notice(u"{} is already in {}".format(user, targetgroup))
# return None
# users.append(user)
# notice(u"{} has been added to the group {}".format(user, targetgroup))
# users.sort()
# json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2)
@hook.command("stfu", adminonly=True)
@hook.command("silence", adminonly=True)
@hook.command(adminonly=True)
def shutup(inp, conn=None, chan=None, notice=None):
"shutup [channel] <user> -- Shuts the user up. "
# inp = inp.split(" ")
if inp[0][0] == "#":
chan = inp.split(" ")[0]
users = inp.split(" ")[1:]
else:
users = inp.split(" ")
for user in users:
out = u"MODE %s +m-voh %s %s %s" % (chan, user, user, user)
conn.send(out)
notice(u"Shut up %s from %s..." % (user, chan))
conn.send(out)
@hook.command(adminonly=True)
def speak(inp, conn=None, chan=None, notice=None):
"speak [channel] <user> -- Shuts the user up. "
if inp[0][0] == "#":
chan = inp.split(" ")[0]
users = inp.split(" ")[1:]
else:
users = inp.split(" ")
for user in users:
out = u"MODE %s -m" % (chan)
conn.send(out)
notice(u"Shut up %s from %s..." % (user, chan))
# @hook.command(adminonly=True, autohelp=False)
# def db(inp,db=None):
# split = inp.split(' ')
# action = split[0]
# if "init" in action:
# result = db.execute("create table if not exists users(nick primary key, host, location, greeting, lastfm, fines, battlestation, desktop, horoscope, version)")
# db.commit()
# return result
# elif "addcol" in action:
# table = split[1]
# col = split[2]
# if table is not None and col is not None:
# db.execute("ALTER TABLE {} ADD COLUMN {}".format(table,col))
# db.commit
# return "Added Column"
# UPDATE usersettings SET fines=(SELECT totalfines FROM fines WHERE nick = usersettings.nick);
def compare_hostmasks(hostmask,matchmask):
hostmask = hostmask.replace('~','').replace('*','\S+').lower()
matchmask = matchmask.replace('*','.+').lower()
if bool(re.search(hostmask,matchmask)): return True
else: return False
@hook.command(adminonly=True)
def checkhost(inp, conn=None, chan=None, notice=None):
inp = inp.split(' ')
hostmask = inp[0]
matchmask = inp[1]
return compare_hostmasks(hostmask,matchmask)
from fnmatch import fnmatch
@hook.command(adminonly=True)
def test(inp,db=None):
#host = user.get_hostmask(inp,db)
nick = inp.strip().replace('~','').lower()
host = database.get(db,'users','mask','nick',nick)
print host
hostmask = host.lower().replace('~','') #.replace('*','\S+')
# hostmask = "*{}*".format(hostmask)
print hostmask
matchmask = "sid18764@.*uxbridge.irccloud.com infinity@.*like.lolis *@i.like.lolis [email protected] [email protected] 680i@.+studby.hig.no themadman@.+want.it.now austin@.+this.is.austin urmom@.+kills.your.gainz moss@.+like.a.hamster quinn@.+fios.verizon.net [email protected] [email protected] ichiroku@.+fios.verizon.net connor@.+nasty.skanky.slut"
#print "{} -- {}".format(matchmask,hostmask)
for pattern in matchmask.split(' '):
if fnmatch(hostmask, pattern):
print "MATCHED: {}".format(pattern)
# print fnmatch(matchmask,hostmask)
# matches = re.search(hostmask,matchmask)
#return matches.group(0)
#if bool(re.search(hostmask,matchmask)): return True
#else: return False
#Database conversion commands
#Update Uguu's default databases
@hook.command(adminonly=True)
def migrate_old_db(inp, notice=None, bot=None, db=None, config=None):
#db.execute("ALTER TABLE seen_user RENAME TO seen")
#db.execute("create table if not exists seen(name, time, quote, chan, host, primary key(name, chan))")
db.commit()
#db.execute("ALTER TABLE weather RENAME TO locations")
#db.execute("DROP TABLE seen")
#db.execute("DROP TABLE seen")
#db.execute("create table if not exists seen(name, time, quote, chan, host, "
# "primary key(name, chan))")
#db.commit()
#db.commit()
#db.execute("ALTER TABLE seen_user RENAME TO seen")
#db.execute("INSERT OR IGNORE INTO usersettings (nick, lastfm) SELECT ircname, lastfmname FROM usernames")
#notice('LastFM data was imported into usersettings')
#db.commit()
#Migrate old CloudBot DBs
#LastFM
#db.execute("create table if not exists usernames (ircname primary key, lastfmname)")
#db.execute("INSERT INTO usernames (ircname, lastfmname) SELECT nick, acc FROM lastfm")
#db.execute("DROP TABLE lastfm")
#db.commit()
#Weather
#db.execute("create table if not exists locationsCopy (ircname primary key, location)")
#db.execute("INSERT INTO locationsCopy (ircname, location) SELECT nick, loc FROM locations")
#db.execute("ALTER TABLE locations RENAME TO locationsOrig")
#db.execute("ALTER TABLE locationsCopy RENAME TO locations")
#db.execute("DROP TABLE locationsOrig")
#db.commit()
conn.send(out)
# OLD
# @hook.command
# def distance(inp):
# "distance <start> to <end> -- Calculate the distance between 2 places."
# if 'from ' in inp: inp = inp.replace('from ','')
# inp = inp.replace(', ','+')
# start = inp.split(" to ")[0].strip().replace(' ','+')
# dest = inp.split(" to ")[1].strip().replace(' ','+')
# url = "http://www.travelmath.com/flying-distance/from/%s/to/%s" % (start, dest)
# print url
# soup = http.get_soup(url)
# query = soup.find('h1', {'class': re.compile('flight-distance')})
# distance = soup.find('h3', {'class': 'space'})
# result = "%s %s" % (query, distance)
# result = http.strip_html(result)
# result = unicode(result, "utf8").replace('flight ','')
# if not distance:
# return "Could not calculate the distance from %s to %s." % (start, dest)
# return result
| gpl-3.0 |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/setuptools/ssl_support.py | 332 | 7663 | import os
import socket
import atexit
import re
import pkg_resources
from pkg_resources import ResolutionError, ExtractionError
from setuptools.compat import urllib2
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
""".strip().split()
HTTPSHandler = HTTPSConnection = object
for what, where in (
('HTTPSHandler', ['urllib2','urllib.request']),
('HTTPSConnection', ['httplib', 'http.client']),
):
for module in where:
try:
exec("from %s import %s" % (module, what))
except ImportError:
pass
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib2.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
_wincerts = None
def get_win_certfile():
global _wincerts
if _wincerts is not None:
return _wincerts.name
try:
from wincertstore import CertFile
except ImportError:
return None
class MyCertFile(CertFile):
def __init__(self, stores=(), certs=()):
CertFile.__init__(self)
for store in stores:
self.addstore(store)
self.addcerts(certs)
atexit.register(self.close)
_wincerts = MyCertFile(stores=['CA', 'ROOT'])
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
return pkg_resources.resource_filename('certifi', 'cacert.pem')
except (ImportError, ResolutionError, ExtractionError):
return None
| mit |
phantomlinux/vaughn_aiy_raspberrypi | src/action.py | 1 | 14088 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Carry out voice commands by recognising keywords."""
import datetime
import logging
import subprocess
import vlc
import time
import requests
import re
import actionbase
# =============================================================================
#
# Hey, Makers!
#
# This file contains some examples of voice commands that are handled locally,
# right on your Raspberry Pi.
#
# Do you want to add a new voice command? Check out the instructions at:
# https://aiyprojects.withgoogle.com/voice/#makers-guide-3-3--create-a-new-voice-command-or-action
# (MagPi readers - watch out! You should switch to the instructions in the link
# above, since there's a mistake in the MagPi instructions.)
#
# In order to make a new voice command, you need to do two things. First, make a
# new action where it says:
# "Implement your own actions here"
# Secondly, add your new voice command to the actor near the bottom of the file,
# where it says:
# "Add your own voice commands here"
#
# =============================================================================
# Actions might not use the user's command. pylint: disable=unused-argument
# Example: Say a simple response
# ================================
#
# This example will respond to the user by saying something. You choose what it
# says when you add the command below - look for SpeakAction at the bottom of
# the file.
#
# There are two functions:
# __init__ is called when the voice commands are configured, and stores
# information about how the action should work:
# - self.say is a function that says some text aloud.
# - self.words are the words to use as the response.
# run is called when the voice command is used. It gets the user's exact voice
# command as a parameter.
class SpeakAction(object):
"""Says the given text via TTS."""
def __init__(self, say, words):
self.say = say
self.words = words
def run(self, voice_command):
self.say(self.words)
# Example: Tell the current time
# ==============================
#
# This example will tell the time aloud. The to_str function will turn the time
# into helpful text (for example, "It is twenty past four."). The run function
# uses to_str say it aloud.
class SpeakTime(object):
"""Says the current local time with TTS."""
def __init__(self, say):
self.say = say
def run(self, voice_command):
time_str = self.to_str(datetime.datetime.now())
self.say(time_str)
def to_str(self, dt):
"""Convert a datetime to a human-readable string."""
HRS_TEXT = ['midnight', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve']
MINS_TEXT = ["five", "ten", "quarter", "twenty", "twenty-five", "half"]
hour = dt.hour
minute = dt.minute
# convert to units of five minutes to the nearest hour
minute_rounded = (minute + 2) // 5
minute_is_inverted = minute_rounded > 6
if minute_is_inverted:
minute_rounded = 12 - minute_rounded
hour = (hour + 1) % 24
# convert time from 24-hour to 12-hour
if hour > 12:
hour -= 12
if minute_rounded == 0:
if hour == 0:
return 'It is midnight.'
return "It is %s o'clock." % HRS_TEXT[hour]
if minute_is_inverted:
return 'It is %s to %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
return 'It is %s past %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
# Example: Run a shell command and say its output
# ===============================================
#
# This example will use a shell command to work out what to say. You choose the
# shell command when you add the voice command below - look for the example
# below where it says the IP address of the Raspberry Pi.
class SpeakShellCommandOutput(object):
"""Speaks out the output of a shell command."""
def __init__(self, say, shell_command, failure_text):
self.say = say
self.shell_command = shell_command
self.failure_text = failure_text
def run(self, voice_command):
output = subprocess.check_output(self.shell_command, shell=True).strip()
if output:
self.say(output)
elif self.failure_text:
self.say(self.failure_text)
# Example: Change the volume
# ==========================
#
# This example will can change the speaker volume of the Raspberry Pi. It uses
# the shell command SET_VOLUME to change the volume, and then GET_VOLUME gets
# the new volume. The example says the new volume aloud after changing the
# volume.
class VolumeControl(object):
"""Changes the volume and says the new level."""
GET_VOLUME = r'amixer get Master | grep "Front Left:" | sed "s/.*\[\([0-9]\+\)%\].*/\1/"'
SET_VOLUME = 'amixer -q set Master %d%%'
def __init__(self, say, change):
self.say = say
self.change = change
def run(self, voice_command):
res = subprocess.check_output(VolumeControl.GET_VOLUME, shell=True).strip()
try:
logging.info("volume: %s", res)
vol = int(res) + self.change
vol = max(0, min(100, vol))
subprocess.call(VolumeControl.SET_VOLUME % vol, shell=True)
self.say(_('Volume at %d %%.') % vol)
except (ValueError, subprocess.CalledProcessError):
logging.exception("Error using amixer to adjust volume.")
# Example: Repeat after me
# ========================
#
# This example will repeat what the user said. It shows how you can access what
# the user said, and change what you do or how you respond.
class RepeatAfterMe(object):
"""Repeats the user's command."""
def __init__(self, say, keyword):
self.say = say
self.keyword = keyword
def run(self, voice_command):
# The command still has the 'repeat after me' keyword, so we need to
# remove it before saying whatever is left.
to_repeat = voice_command.replace(self.keyword, '', 1)
self.say(to_repeat)
# Power: Shutdown or reboot the pi
# ================================
# Shuts down the pi or reboots with a response
#
class PowerCommand(object):
"""Shutdown or reboot the pi"""
def __init__(self, say, command):
self.say = say
self.command = command
def run(self, voice_command):
if self.command == "shutdown":
self.say("Shutting down, goodbye")
subprocess.call("sudo shutdown now", shell=True)
elif self.command == "reboot":
self.say("Rebooting")
subprocess.call("sudo shutdown -r now", shell=True)
else:
logging.error("Error identifying power command.")
self.say("Sorry I didn't identify that command")
class playRadio(object):
def __init__(self, say, keyword):
self.say = say
self.keyword = keyword
self.instance = vlc.Instance()
global player
player = self.instance.media_player_new()
self.set_state("stopped")
def set_state(self, new_state):
logging.info("setting radio state " + new_state)
global radioState
radioState = new_state
def get_state():
return radioState
def get_station(self, station_name):
# replace the stream for the first line 'radio' with the stream for your default station
stations = {
'1': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_one.m3u8',
'2': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_two.m3u8',
'3': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_three.m3u8',
'4': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_fourfm.m3u8',
'5': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_five_live.m3u8',
'5 sports': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_five_live_sports_extra.m3u8',
'6': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_6music.m3u8',
'1xtra': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_1xtra.m3u8',
'4 extra': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_four_extra.m3u8',
'nottingham': 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/uk/sbr_high/ak/bbc_radio_nottingham.m3u8',
'hits fm': 'http://www.surfmusic.de/m3u/hitz-fm,11410.m3u',
'hitz fm': 'http://www.surfmusic.de/m3u/hitz-fm,11410.m3u',
'one fm': 'http://www.surfmusic.de/m3u/one-fm-88-1-fm,15944.m3u',
'fly fm': 'http://www.surfmusic.de/m3u/fly-fm-95-8,9447.m3u',
'988 fm': 'http://www.surfmusic.de/m3u/radio-98-8,5253.m3u',
}
return stations[station_name]
def run(self, voice_command):
voice_command = ((voice_command.lower()).replace(self.keyword, '', 1)).strip()
if (voice_command == "stop") or (voice_command == "off"):
logging.info("radio stopped")
player.stop()
self.set_state("stopped")
return
logging.info("starting radio: " + voice_command)
global station
try:
logging.info("searching for: " + voice_command)
station = self.get_station(voice_command)
except KeyError:
# replace this stream with the stream for your default station
self.say("Radio search not found. Playing radio 6")
station = 'http://www.surfmusic.de/m3u/hitz-fm,11410.m3u'
if station.endswith("m3u"):
logging.info("m3u reading manually")
content = requests.get(station, stream=True).text
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)[0]
station = url.strip()
logging.info("stream " + station)
media = self.instance.media_new(station)
player.set_media(media)
self.set_state("playing")
def pause():
logging.info("pausing radio")
if player is not None:
player.stop()
def resume():
radioState = playRadio.get_state()
logging.info("resuming radio " + radioState)
if radioState == "playing":
player.play()
# =========================================
# Makers! Implement your own actions here.
# =========================================
def make_actor(say):
"""Create an actor to carry out the user's commands."""
actor = actionbase.Actor()
actor.add_keyword(
_('ip address'), SpeakShellCommandOutput(
say, "ip -4 route get 1 | head -1 | cut -d' ' -f8",
_('I do not have an ip address assigned to me.')))
actor.add_keyword(_('volume up'), VolumeControl(say, 10))
actor.add_keyword(_('volume down'), VolumeControl(say, -10))
actor.add_keyword(_('max volume'), VolumeControl(say, 100))
actor.add_keyword(_('repeat after me'),
RepeatAfterMe(say, _('repeat after me')))
# =========================================
# Makers! Add your own voice commands here.
# =========================================
actor.add_keyword(_('power off'), PowerCommand(say, 'shutdown'))
actor.add_keyword(_('reboot'), PowerCommand(say, 'reboot'))
actor.add_keyword(_('radio'), playRadio(say, _('radio')))
return actor
def add_commands_just_for_cloud_speech_api(actor, say):
"""Add simple commands that are only used with the Cloud Speech API."""
def simple_command(keyword, response):
actor.add_keyword(keyword, SpeakAction(say, response))
simple_command('alexa', _("We've been friends since we were both starter projects"))
simple_command(
'beatbox',
'pv zk pv pv zk pv zk kz zk pv pv pv zk pv zk zk pzk pzk pvzkpkzvpvzk kkkkkk bsch')
simple_command(_('clap'), _('clap clap'))
simple_command('google home', _('She taught me everything I know.'))
simple_command(_('hello'), _('hello to you too'))
simple_command(_('tell me a joke'),
_('What do you call an alligator in a vest? An investigator.'))
simple_command(_('three laws of robotics'),
_("""The laws of robotics are
0: A robot may not injure a human being or, through inaction, allow a human
being to come to harm.
1: A robot must obey orders given it by human beings except where such orders
would conflict with the First Law.
2: A robot must protect its own existence as long as such protection does not
conflict with the First or Second Law."""))
simple_command(_('where are you from'), _("A galaxy far, far, just kidding. I'm from Seattle."))
simple_command(_('your name'), _('A machine has no name'))
actor.add_keyword(_('time'), SpeakTime(say))
# =========================================
# Makers! Add commands to pause and resume your actions here
# =========================================
def pauseActors():
"""add your resume actions here"""
playRadio.pause()
def resumeActors():
"""add your pause actions here"""
playRadio.resume()
| apache-2.0 |
alvarolopez/nova | plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py | 54 | 7407 | #!/usr/bin/env python
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
# which means the Nova xenapi plugins must use only Python 2.4 features
#
# XenAPI plugin for reading/writing information to xenstore
#
try:
import json
except ImportError:
import simplejson as json
import utils # noqa
import XenAPIPlugin # noqa
import pluginlib_nova as pluginlib # noqa
pluginlib.configure_logging("xenstore")
class XenstoreError(pluginlib.PluginError):
"""Errors that occur when calling xenstore-* through subprocesses."""
def __init__(self, cmd, return_code, stderr, stdout):
msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s"
msg = msg % (cmd, return_code, stderr, stdout)
self.cmd = cmd
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
pluginlib.PluginError.__init__(self, msg)
def jsonify(fnc):
def wrapper(*args, **kwargs):
ret = fnc(*args, **kwargs)
try:
json.loads(ret)
except ValueError:
# Value should already be JSON-encoded, but some operations
# may write raw sting values; this will catch those and
# properly encode them.
ret = json.dumps(ret)
return ret
return wrapper
def record_exists(arg_dict):
"""Returns whether or not the given record exists. The record path
is determined from the given path and dom_id in the arg_dict.
"""
cmd = ["xenstore-exists", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
_run_command(cmd)
return True
except XenstoreError, e: # noqa
if e.stderr == '':
# if stderr was empty, this just means the path did not exist
return False
# otherwise there was a real problem
raise
@jsonify
def read_record(self, arg_dict):
"""Returns the value stored at the given path for the given dom_id.
These must be encoded as key/value pairs in arg_dict. You can
optinally include a key 'ignore_missing_path'; if this is present
and boolean True, attempting to read a non-existent path will return
the string 'None' instead of raising an exception.
"""
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
result = _run_command(cmd)
return result.strip()
except XenstoreError, e: # noqa
if not arg_dict.get("ignore_missing_path", False):
raise
if not record_exists(arg_dict):
return "None"
# Just try again in case the agent write won the race against
# the record_exists check. If this fails again, it will likely raise
# an equally meaningful XenstoreError as the one we just caught
result = _run_command(cmd)
return result.strip()
@jsonify
def write_record(self, arg_dict):
"""Writes to xenstore at the specified path. If there is information
already stored in that location, it is overwritten. As in read_record,
the dom_id and path must be specified in the arg_dict; additionally,
you must specify a 'value' key, whose value must be a string. Typically,
you can json-ify more complex values and store the json output.
"""
cmd = ["xenstore-write",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict,
arg_dict["value"]]
_run_command(cmd)
return arg_dict["value"]
@jsonify
def list_records(self, arg_dict):
"""Returns all the stored data at or below the given path for the
given dom_id. The data is returned as a json-ified dict, with the
path as the key and the stored value as the value. If the path
doesn't exist, an empty dict is returned.
"""
dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = ["xenstore-ls", dirpath.rstrip("/")]
try:
recs = _run_command(cmd)
except XenstoreError, e: # noqa
if not record_exists(arg_dict):
return {}
# Just try again in case the path was created in between
# the "ls" and the existence check. If this fails again, it will
# likely raise an equally meaningful XenstoreError
recs = _run_command(cmd)
base_path = arg_dict["path"]
paths = _paths_from_ls(recs)
ret = {}
for path in paths:
if base_path:
arg_dict["path"] = "%s/%s" % (base_path, path)
else:
arg_dict["path"] = path
rec = read_record(self, arg_dict)
try:
val = json.loads(rec)
except ValueError:
val = rec
ret[path] = val
return ret
@jsonify
def delete_record(self, arg_dict):
"""Just like it sounds: it removes the record for the specified
VM and the specified path from xenstore.
"""
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
return _run_command(cmd)
except XenstoreError, e: # noqa
if 'could not remove path' in e.stderr:
# Entry already gone. We're good to go.
return ''
raise
def _paths_from_ls(recs):
"""The xenstore-ls command returns a listing that isn't terribly
useful. This method cleans that up into a dict with each path
as the key, and the associated string as the value.
"""
last_nm = ""
level = 0
path = []
ret = []
for ln in recs.splitlines():
nm, val = ln.rstrip().split(" = ")
barename = nm.lstrip()
this_level = len(nm) - len(barename)
if this_level == 0:
ret.append(barename)
level = 0
path = []
elif this_level == level:
# child of same parent
ret.append("%s/%s" % ("/".join(path), barename))
elif this_level > level:
path.append(last_nm)
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
elif this_level < level:
path = path[:this_level]
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
last_nm = barename
return ret
def _run_command(cmd):
"""Wrap utils.run_command to raise XenstoreError on failure
"""
try:
return utils.run_command(cmd)
except utils.SubprocessException, e: # noqa
raise XenstoreError(e.cmdline, e.ret, e.err, e.out)
if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"read_record": read_record,
"write_record": write_record,
"list_records": list_records,
"delete_record": delete_record})
| apache-2.0 |
ztianjin/jna | native/libffi/generate-osx-source-and-headers.py | 100 | 5200 | #!/usr/bin/env python
import subprocess
import re
import os
import errno
import collections
import sys
class Platform(object):
pass
sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
def sdkinfo(sdkname):
ret = {}
for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
kv = line.strip().split(': ', 1)
if len(kv) == 2:
k,v = kv
ret[k] = v
return ret
desktop_sdk_info = sdkinfo('macosx')
def latest_sdks():
latest_desktop = None
for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
match = sdk_re.match(line)
if match:
if 'OS X' in line:
latest_desktop = match.group(1)
return latest_desktop
desktop_sdk = latest_sdks()
class desktop_platform_32(Platform):
sdk='macosx'
arch = 'i386'
name = 'mac32'
triple = 'i386-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if defined(__i386__) && !defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
class desktop_platform_64(Platform):
sdk='macosx'
arch = 'x86_64'
name = 'mac'
triple = 'x86_64-apple-darwin10'
sdkroot = desktop_sdk_info['Path']
prefix = "#if !defined(__i386__) && defined(__x86_64__)\n\n"
suffix = "\n\n#endif"
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
headers_seen = collections.defaultdict(set)
def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
for root, dirs, files in os.walk(src_dir, followlinks=True):
relroot = os.path.relpath(root,src_dir)
def move_dir(arch, prefix='', suffix='', files=[]):
for file in files:
file_suffix = None
if file.endswith('.h'):
if dest_include_dir:
file_suffix = arch
if arch:
headers_seen[file].add(arch)
move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
elif dest_dir:
outroot = os.path.join(dest_dir, relroot)
move_file(root, outroot, file, prefix=prefix, suffix=suffix)
if relroot == '.':
move_dir(arch=arch,
files=files,
prefix=prefix,
suffix=suffix)
elif relroot == 'x86':
move_dir(arch='i386',
prefix="#if defined(__i386__) && !defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=files)
move_dir(arch='x86_64',
prefix="#if !defined(__i386__) && defined(__x86_64__)\n\n",
suffix="\n\n#endif",
files=files)
def build_target(platform):
def xcrun_cmd(cmd):
return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
build_dir = 'build_' + platform.name
if not os.path.exists(build_dir):
os.makedirs(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='-arch %s -isysroot %s -mmacosx-version-min=10.6' % (platform.arch, platform.sdkroot))
working_dir=os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
move_source_tree('.', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
move_source_tree('./include', None, '../osx/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
finally:
os.chdir(working_dir)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
def main():
move_source_tree('src', 'osx/src', 'osx/include')
move_source_tree('include', None, 'osx/include')
build_target(desktop_platform_32)
build_target(desktop_platform_64)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('osx/include', header_name), 'w') as header:
for arch in archs:
header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
if __name__ == '__main__':
main()
| lgpl-2.1 |
coderabhishek/scrapy | scrapy/utils/log.py | 108 | 6012 | # -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import overridden_settings, Settings
from scrapy.exceptions import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
logging.root.setLevel(logging.NOTSET)
handler = _get_handler(settings)
logging.root.addHandler(handler)
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Optional features available: %(features)s",
{'features': ", ".join(scrapy.optional_features)})
d = dict(overridden_settings(settings))
logger.info("Overridden settings: %(settings)r", {'settings': d})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
| bsd-3-clause |
sarvex/depot-tools | third_party/logilab/common/logging_ext.py | 93 | 6975 | # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extends the logging module from the standard library."""
__docformat__ = "restructuredtext en"
import os
import sys
import logging
from six import string_types
from logilab.common.textutils import colorize_ansi
def set_log_methods(cls, logger):
"""bind standard logger's methods as methods on the class"""
cls.__logger = logger
for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'):
setattr(cls, attr, getattr(logger, attr))
def xxx_cyan(record):
if 'XXX' in record.message:
return 'cyan'
class ColorFormatter(logging.Formatter):
"""
A color Formatter for the logging standard module.
By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in
green and DEBUG in yellow.
self.colors is customizable via the 'color' constructor argument (dictionary).
self.colorfilters is a list of functions that get the LogRecord
and return a color name or None.
"""
def __init__(self, fmt=None, datefmt=None, colors=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.colorfilters = []
self.colors = {'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'magenta',
'INFO': 'green',
'DEBUG': 'yellow',
}
if colors is not None:
assert isinstance(colors, dict)
self.colors.update(colors)
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.levelname in self.colors:
color = self.colors[record.levelname]
return colorize_ansi(msg, color)
else:
for cf in self.colorfilters:
color = cf(record)
if color:
return colorize_ansi(msg, color)
return msg
def set_color_formatter(logger=None, **kw):
"""
Install a color formatter on the 'logger'. If not given, it will
defaults to the default logger.
Any additional keyword will be passed as-is to the ColorFormatter
constructor.
"""
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
format_msg = logger.handlers[0].formatter._fmt
fmt = ColorFormatter(format_msg, **kw)
fmt.colorfilters.append(xxx_cyan)
logger.handlers[0].setFormatter(fmt)
LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_handler(debug=False, syslog=False, logfile=None, rotation_parameters=None):
"""get an apropriate handler according to given parameters"""
if os.environ.get('APYCOT_ROOT'):
handler = logging.StreamHandler(sys.stdout)
if debug:
handler = logging.StreamHandler()
elif logfile is None:
if syslog:
from logging import handlers
handler = handlers.SysLogHandler()
else:
handler = logging.StreamHandler()
else:
try:
if rotation_parameters is None:
if os.name == 'posix' and sys.version_info >= (2, 6):
from logging.handlers import WatchedFileHandler
handler = WatchedFileHandler(logfile)
else:
handler = logging.FileHandler(logfile)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(
logfile, **rotation_parameters)
except IOError:
handler = logging.StreamHandler()
return handler
def get_threshold(debug=False, logthreshold=None):
if logthreshold is None:
if debug:
logthreshold = logging.DEBUG
else:
logthreshold = logging.ERROR
elif isinstance(logthreshold, string_types):
logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold,
logthreshold))
return logthreshold
def _colorable_terminal():
isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
if not isatty:
return False
if os.name == 'nt':
try:
from colorama import init as init_win32_colors
except ImportError:
return False
init_win32_colors()
return True
def get_formatter(logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT):
if _colorable_terminal():
fmt = ColorFormatter(logformat, logdateformat)
def col_fact(record):
if 'XXX' in record.message:
return 'cyan'
if 'kick' in record.message:
return 'red'
fmt.colorfilters.append(col_fact)
else:
fmt = logging.Formatter(logformat, logdateformat)
return fmt
def init_log(debug=False, syslog=False, logthreshold=None, logfile=None,
logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT, fmt=None,
rotation_parameters=None, handler=None):
"""init the log service"""
logger = logging.getLogger()
if handler is None:
handler = get_handler(debug, syslog, logfile, rotation_parameters)
# only addHandler and removeHandler method while I would like a setHandler
# method, so do it this way :$
logger.handlers = [handler]
logthreshold = get_threshold(debug, logthreshold)
logger.setLevel(logthreshold)
if fmt is None:
if debug:
fmt = get_formatter(logformat=logformat, logdateformat=logdateformat)
else:
fmt = logging.Formatter(logformat, logdateformat)
handler.setFormatter(fmt)
return handler
# map logilab.common.logger thresholds to logging thresholds
THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG',
'LOG_INFO': 'INFO',
'LOG_NOTICE': 'INFO',
'LOG_WARN': 'WARNING',
'LOG_WARNING': 'WARNING',
'LOG_ERR': 'ERROR',
'LOG_ERROR': 'ERROR',
'LOG_CRIT': 'CRITICAL',
}
| bsd-3-clause |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/tap/ftp.py | 67 | 2002 | # -*- test-case-name: twisted.test.test_ftp_options -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am the support module for making a ftp server with twistd.
"""
from twisted.application import internet
from twisted.cred import portal, checkers, strcred
from twisted.protocols import ftp
from twisted.python import usage, deprecate, versions
import warnings
class Options(usage.Options, strcred.AuthOptionMixin):
synopsis = """[options].
WARNING: This FTP server is probably INSECURE do not use it.
"""
optParameters = [
["port", "p", "2121", "set the port number"],
["root", "r", "/usr/local/ftp", "define the root of the ftp-site."],
["userAnonymous", "", "anonymous", "Name of the anonymous user."]
]
compData = usage.Completions(
optActions={"root": usage.CompleteDirs(descr="root of the ftp site")}
)
longdesc = ''
def __init__(self, *a, **kw):
usage.Options.__init__(self, *a, **kw)
self.addChecker(checkers.AllowAnonymousAccess())
def opt_password_file(self, filename):
"""
Specify a file containing username:password login info for
authenticated connections. (DEPRECATED; see --help-auth instead)
"""
self['password-file'] = filename
msg = deprecate.getDeprecationWarningString(
self.opt_password_file, versions.Version('Twisted', 11, 1, 0))
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
self.addChecker(checkers.FilePasswordDB(filename, cache=True))
def makeService(config):
f = ftp.FTPFactory()
r = ftp.FTPRealm(config['root'])
p = portal.Portal(r, config.get('credCheckers', []))
f.tld = config['root']
f.userAnonymous = config['userAnonymous']
f.portal = p
f.protocol = ftp.FTP
try:
portno = int(config['port'])
except KeyError:
portno = 2121
return internet.TCPServer(portno, f)
| gpl-3.0 |
rossjones/ScraperWikiX | web/codewiki/viewsrpc.py | 1 | 14719 | from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotFound, HttpResponseForbidden
from django.template.loader import render_to_string
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.core.mail import send_mail, mail_admins
from django.conf import settings
from codewiki.models.code import MAGIC_RUN_INTERVAL
from codewiki import runsockettotwister
from codewiki import models
import base64
import ConfigParser
import datetime
import logging
import re
import smtplib
import sys
logger = logging
try: import json
except ImportError: import simplejson as json
config = ConfigParser.ConfigParser()
config.readfp(open(settings.CONFIGFILE))
def scraperwikitag(scraper, html, panepresent):
mswpane = re.search('(?i)<div[^>]*?id="scraperwikipane"[^>/]*(?:/\s*>|>.*?</div>)', html)
if mswpane:
startend = (mswpane.start(0), mswpane.end(0))
mclass = re.search('class="([^"]*)"', mswpane.group(0))
if mclass:
paneversion = mclass.group(1)
else:
paneversion = "version-2"
if panepresent != None:
panepresent["scraperwikipane"].append(mswpane)
elif panepresent == None: # case where no div#scraperwikipane is found and it's all there (we're not streaming the html out using php)
# have to insert the pane -- favour doing it after the body tag if it exists
mbody = re.search("(?i)<body.*?>", html)
if mbody:
startend = (mbody.end(0), mbody.end(0))
else:
startend = (0, 0) # (0,0)
paneversion = "version-2"
else:
if len(panepresent["firstfivelines"]) < 5 and re.search("\S", html):
panepresent["firstfivelines"].append(html)
return html
urlbase = settings.MAIN_URL
urlscraperoverview = urlbase + reverse('code_overview', args=[scraper.wiki_type, scraper.short_name])
urlscraperedit = urlbase + reverse('editor_edit', args=[scraper.wiki_type, scraper.short_name])
urlpoweredlogo = settings.MEDIA_URL + "images/powered.png";
swdivstyle = "border:thin #aaf solid; display:block; position:fixed; top:0px; right:0px; background:#eef; margin: 0em; padding: 6pt; font-size: 10pt; z-index: 8675309; "
swlinkstyle = "width:167px; height:17px; margin:0; padding: 0; border-style: none; "
if paneversion == "version-1":
swpane = [ '<div id="scraperwikipane" style="%s;">' % swdivstyle ]
swpane.append('<a href="%s" id="scraperwikipane" style="%s"><img style="border-style: none" src="%s" alt="Powered by ScraperWiki"></a>' % (urlbase, swlinkstyle, urlpoweredlogo))
swpane.append('<br><a href="%s" title="Go to overview page">%s</a>' % (urlscraperoverview, scraper.title))
swpane.append(' (<a href="%s" title="Edit source code for this view">edit</a>)' % (urlscraperedit))
swpane.append('</div>')
else:
swpane = [ '<div id="scraperwikipane" style="%s;">' % swdivstyle ]
swpane.append('<a href="%s" id="scraperwikipane" style="%s"><img style="border-style: none" src="%s" alt="Powered by ScraperWiki"></a>' % (urlscraperoverview, swlinkstyle, urlpoweredlogo))
swpane.append('</div>')
return "%s%s%s" % (html[:startend[0]], "".join(swpane), html[startend[1]:])
def rpcexecute(request, short_name, revision=None):
apikey = request.GET.get('apikey', None)
try:
scraper = models.Code.objects.get(short_name=short_name)
except models.Code.DoesNotExist:
return HttpResponseNotFound(render_to_string('404.html', {'heading':'Not found', 'body':"Sorry, this view does not exist"}, context_instance=RequestContext(request)))
if scraper.wiki_type == 'scraper':
if not scraper.actionauthorized(request.user, "rpcexecute"):
return HttpResponseForbidden(render_to_string('404.html', scraper.authorizationfailedmessage(request.user, "rpcexecute"), context_instance=RequestContext(request)))
if not scraper.api_actionauthorized(apikey):
return HttpResponseForbidden(render_to_string('404.html',
{'heading': 'Not authorized', 'body': 'API key required to access this view'}, context_instance=RequestContext(request)))
if revision:
try:
revision = int(revision)
except ValueError:
revision = None
# quick case where we have PHP with no PHP code in it (it's all pure HTML)
if scraper.language in ['html', 'php', 'javascript']:
code = scraper.saved_code(revision)
if scraper.language == 'php' and not re.search('<\?', code):
return HttpResponse(scraperwikitag(scraper, code, None))
if scraper.language == 'html':
return HttpResponse(scraperwikitag(scraper, code, None))
if scraper.language == 'javascript':
return HttpResponse(code, mimetype='application/javascript')
if revision == None:
revision = -1
# run it the socket method for staff members who can handle being broken
runnerstream = runsockettotwister.RunnerSocket()
runnerstream.runview(request.user, scraper, revision, request.META["QUERY_STRING"])
# We build the response on the fly in case we get an HTTP
# Content-Type header (or similar) before anything is streamed.
response = None
panepresent = {"scraperwikipane":[], "firstfivelines":[]}
for line in runnerstream:
if line == "":
continue
try:
message = json.loads(line)
except:
pass
# Need to log the message here in debug mode so we can track down the
# 'no output for some unknown reason'. Appears to be missing console
# messages from the lxc/uml and has been happening for a while.
if message['message_type'] == "console":
if not response:
response = HttpResponse()
if message.get('encoding') == 'base64':
response.write(base64.decodestring(message["content"]))
else:
response.write(scraperwikitag(scraper, message["content"], panepresent))
elif message['message_type'] == 'exception':
# :todo: can we use "import cgitb" here?
if not response:
response = HttpResponse()
response.write("<h3>%s</h3>\n" % str(message.get("exceptiondescription")).replace("<", "<"))
for stackentry in message["stackdump"]:
response.write("<h3>%s</h3>\n" % str(stackentry).replace("<", "<"))
# These messages are typically generated by
# scraperwiki.utils.httpresponseheader.
elif message['message_type'] == "httpresponseheader":
# Parameter values have been borrowed from
# http://php.net/manual/en/function.header.php (and hence more
# or less follow the HTTP spec).
if message['headerkey'] == 'Content-Type':
if not response:
response = HttpResponse(mimetype=message['headervalue'])
else:
response.write("<h3>Error: httpresponseheader('%s', '%s') called after start of stream</h3>" % (message['headerkey'], message['headervalue']))
elif message['headerkey'] == 'Content-Disposition':
if not response:
response = HttpResponse()
response['Content-Disposition'] = message['headervalue']
elif message['headerkey'] == 'Location':
if not response:
response = HttpResponseRedirect(message['headervalue'])
else:
response.write("<h3>Error: httpresponseheader('%s', '%s') called after start of stream</h3>" % (message['headerkey'], message['headervalue']))
else:
if not response:
response = HttpResponse()
response.write("<h3>Error: httpresponseheader(headerkey='%s', '%s'); headerkey can only have values 'Content-Type' or 'Content-Disposition'</h3>" % (message['headerkey'], message['headervalue']))
# These messages are typically generated by
# scraperwiki.utils.httpstatuscode.
elif message['message_type'] == 'httpstatuscode':
if not response:
response = HttpResponse(status=message['statuscode'])
else:
response.write(
"<h3>Error:"
" it's too late to try setting HTTP Status Code.</h3>")
if not response:
response = HttpResponse('No output received from view.')
# now decide about inserting the powered by scraperwiki panel (avoid doing it on json)
# print [response['Content-Type']] default is DEFAULT_CONTENT_TYPE, comes out as 'text/html; charset=utf-8'
# How about
if 'Content-Type' in response and 'text/html' in response['Content-Type']:
response.write(scraperwikitag(scraper, '<div id="scraperwikipane" class="version-2"/>', panepresent))
return response
# this form is protected by the django key known to twister, so does not need to be obstructed by the csrf machinery
@csrf_exempt
def twistermakesrunevent(request):
try:
return Dtwistermakesrunevent(request)
except Exception, e:
logger.error("twistermakesruneventerror: %s" % (str(e)))
mail_admins(subject="twistermakesruneventerror: %s" % (str(e)[:30]), message=(str(e)))
return HttpResponse("no done %s" % str(e))
def Dtwistermakesrunevent(request):
if request.POST.get("django_key") != config.get('twister', 'djangokey'):
logger.error("twister wrong djangokey")
return HttpResponse("no access")
run_id = request.POST.get("run_id")
revision = request.POST.get('revision')
if not run_id:
logger.error("twisterbad run_id")
return HttpResponse("bad run_id - %s" % (request.POST,) )
matchingevents = models.ScraperRunEvent.objects.filter(run_id=run_id)
if not matchingevents:
event = models.ScraperRunEvent()
event.scraper = models.Scraper.objects.get(short_name=request.POST.get("scrapername"))
# Would be used to kill it.
clientnumber = request.POST.get("clientnumber")
#event.pid = "client# "+ request.POST.get("clientnumber") # only applies when this runner is active
# only applies when this runner is active
event.pid = (100000000+int(clientnumber))
# Set by execution status.
event.run_id = run_id
# Reset by execution status.
event.run_started = datetime.datetime.now()
# Set the last_run field so we don't select this one again
# for the overdue scrapers.
# This field should't exist because we should use the
# runobjects instead, where we can work from a far richer
# report on what has been happening.
event.scraper.last_run = datetime.datetime.now()
event.scraper.save()
else:
event = matchingevents[0]
# standard updates
event.output = request.POST.get("output")
event.records_produced = int(request.POST.get("records_produced"))
event.pages_scraped = int(request.POST.get("pages_scraped"))
event.first_url_scraped = request.POST.get("first_url_scraped", "")
event.exception_message = request.POST.get("exception_message", "")
event.run_ended = datetime.datetime.now() # last update time
# run finished case
if request.POST.get("exitstatus"):
event.pid = -1 # disable the running state of the event
if event.scraper.run_interval == MAGIC_RUN_INTERVAL:
event.scraper.run_interval = -1
event.scraper.status = request.POST.get("exitstatus") == "exceptionmessage" and "sick" or "ok"
event.scraper.last_run = datetime.datetime.now()
# Enable if views ever have metadata that needs updating
# each refresh.
event.scraper.update_meta()
event.scraper.save()
event.save()
# Event needs to be saved first as it is used in the following DomainScraper
if request.POST.get("exitstatus"):
# report the pages that were scraped
jdomainscrapes = request.POST.get("domainscrapes")
domainscrapes = json.loads(jdomainscrapes)
for netloc, vals in domainscrapes.items():
domainscrape = models.DomainScrape(scraper_run_event=event, domain=netloc)
domainscrape.pages_scraped = vals["pages_scraped"]
domainscrape.bytes_scraped = vals["bytes_scraped"]
domainscrape.save()
#####
# We should remove this block below and do alert emails a different way.
#####
# Send email if this is an email scraper
if request.POST.get("exitstatus"):
logger.info('Checking if this is an email scraper')
emailers = event.scraper.users.filter(usercoderole__role='email')
logger.info('There are %d email users' % emailers.count())
if emailers.count() > 0:
subject, message = getemailtext(event)
logger.info("Retrieved subject %s and message %s" % (subject,message,))
if event.scraper.status == 'ok':
logger.info("Status OK")
if message: # no email if blank
logger.info("Have message")
for user in emailers:
try:
send_mail(subject=subject, message=message, from_email=settings.EMAIL_FROM, recipient_list=[user.email], fail_silently=False)
except smtplib.SMTPException, e:
logger.error("emailer failed %s %s" % (str(user), str(e)))
mail_admins(subject="email failed to send: %s" % (str(user)), message=str(e))
else:
logger.info("No message")
else:
#mail_admins(subject="SICK EMAILER: %s" % subject, message=message)
logger.info('SICK EMAILER: %s' % subject)
else:
logger.info('Not a mail scraper ...')
return HttpResponse("done")
# maybe detect the subject title here
def getemailtext(event):
message = event.output
message = re.sub("(?:^|\n)EXECUTIONSTATUS:.*", "", message).strip()
msubject = re.search("(?:^|\n)EMAILSUBJECT:(.*)", message)
if msubject:
subject = msubject.group(1) # snip out the subject
message = "%s%s" % (message[:msubject.start(0)], message[msubject.end(0):])
else:
subject = 'Your ScraperWiki Email - %s' % event.scraper.short_name
return subject, message
| agpl-3.0 |
LinkCareServices/cairotft | docs/conf.py | 1 | 8584 | # -*- coding: utf-8 -*-
#
# Fitch documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 10 18:37:45 2012.
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Config file for sphinx documentation."""
import sys
import os
sys.path.insert(1, os.path.abspath("../"))
from build_scripts.version import get_git_version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autosummary', 'sphinx.ext.autodoc',
'sphinx.ext.doctest', 'sphinx.ext.viewcode',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.graphviz', 'sphinx.ext.inheritance_diagram', ]
todo_include_todos = True
intersphinx_mapping = {
'python': ('http://docs.python.org/3.4', None),
}
if tags.has('maintenance'):
autodoc_default_flags = []
else:
autodoc_default_flags = ['members', 'undoc-members',
'private-members']
autoclass_content = 'both'
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cairotft'
copyright = u'2015 - Thomas Chiroux - Link Care Services'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_git_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'armstrong' # 'linfiniti-sphinx-theme' # 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
html_use_modindex = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cairotft'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'cairotft.tex', u'cairotft Documentation',
u'', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cairotft', u'cairotft Documentation',
[u''], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cairotft', u'cairotft Documentation',
u'', 'cairotft', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| bsd-3-clause |
KanoComputing/kano-profile | kano_profile/tracker/__init__.py | 1 | 9447 | #
# __init__.py
#
# Copyright (C) 2014 - 2018 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Kano-tracker module
#
# A small module for tracking various metrics the users do in Kano OS
#
__author__ = 'Kano Computing Ltd.'
__email__ = '[email protected]'
import time
import atexit
import datetime
import json
import os
import subprocess
import shlex
from uuid import uuid1, uuid5
from kano.utils.file_operations import read_file_contents, chown_path, \
ensure_dir
from kano.utils.hardware import get_cpu_id
from kano.utils.misc import is_number
from kano.logging import logger
from kano_profile.apps import get_app_state_file, load_app_state_variable, \
save_app_state_variable
from kano_profile.paths import tracker_dir, tracker_events_file, \
tracker_token_file
from kano_profile.tracker.tracker_token import TOKEN, generate_tracker_token, \
load_token
from kano_profile.tracker.tracking_utils import open_locked, \
get_nearest_previous_monday, get_utc_offset
from kano_profile.tracker.tracking_session import TrackingSession
from kano_profile.tracker.tracking_sessions import session_start, session_end, \
list_sessions, get_open_sessions, get_session_file_path, session_log, \
get_session_unique_id, get_session_event, CPU_ID, LANGUAGE, OS_VERSION
# Public imports
from kano_profile.tracker.tracker import Tracker
from kano_profile.tracker.tracking_sessions import session_start, session_end, \
pause_tracking_sessions, unpause_tracking_sessions
def track_data(name, data):
""" Track arbitrary data.
Calling this function will generate a data tracking event.
:param name: The identifier of the data.
:type name: str
:param data: Arbitrary data, must be compatible with JSON.
:type data: dict, list, str, int, float, None
"""
try:
af = open_locked(tracker_events_file, 'a')
except IOError as e:
logger.error("Error opening tracker events file {}".format(e))
else:
with af:
event = get_data_event(name, data)
af.write(json.dumps(event) + "\n")
if 'SUDO_USER' in os.environ:
chown_path(tracker_events_file)
def track_action(name):
""" Trigger an action tracking event.
:param name: The identifier of the action.
:type name: str
"""
try:
af = open_locked(tracker_events_file, 'a')
except IOError as e:
logger.error("Error opening tracker events file {}".format(e))
else:
with af:
event = get_action_event(name)
af.write(json.dumps(event) + "\n")
if 'SUDO_USER' in os.environ:
chown_path(tracker_events_file)
def track_subprocess(name, cmd):
""" Launch and track the session of a process.
:param name: Name of the session.
:type name: str
:param cmd: The command line (env vars are not supported).
:type cmd: str
"""
cmd_args = shlex.split(cmd)
p = subprocess.Popen(cmd_args)
pid = p.pid
session_start(name, pid)
p.wait()
session_end(get_session_file_path(name, pid))
def get_data_event(name, data):
"""TODO"""
return {
'type': 'data',
'time': int(time.time()),
'timezone_offset': get_utc_offset(),
'os_version': OS_VERSION,
'cpu_id': CPU_ID,
'token': TOKEN,
'language': LANGUAGE,
'name': str(name),
'data': data
}
def get_action_event(name):
"""TODO"""
return {
'type': 'action',
'time': int(time.time()),
'timezone_offset': get_utc_offset(),
'os_version': OS_VERSION,
'cpu_id': CPU_ID,
'token': TOKEN,
'language': LANGUAGE,
'name': name
}
def add_runtime_to_app(app, runtime):
""" Saves the tracking data for a given application.
Appends a time period to a given app's runtime stats and raises
starts by one. Apart from the total values, it also updates the
weekly stats.
This function uses advisory file locks (see flock(2)) to avoid
races between different applications saving their tracking data
at the same time.
:param app: The name of the application.
:type app: str
:param runtime: For how long was the app running.
:type runtime: number
"""
if not app or app == 'kano-tracker':
return
if not is_number(runtime):
return
runtime = float(runtime)
app = app.replace('.', '_')
# Make sure no one else is accessing this file
app_state_file = get_app_state_file('kano-tracker')
try:
tracker_store = open_locked(app_state_file, 'r')
except IOError as e:
logger.error("Error opening app state file {}".format(e))
else:
app_stats = load_app_state_variable('kano-tracker', 'app_stats')
if not app_stats:
app_stats = dict()
try:
app_stats[app]['starts'] += 1
app_stats[app]['runtime'] += runtime
except Exception:
app_stats[app] = {
'starts': 1,
'runtime': runtime,
}
# Record usage data on per-week basis
if 'weekly' not in app_stats[app]:
app_stats[app]['weekly'] = {}
week = str(get_nearest_previous_monday())
if week not in app_stats[app]['weekly']:
app_stats[app]['weekly'][week] = {
'starts': 0,
'runtime': 0
}
app_stats[app]['weekly'][week]['starts'] += 1
app_stats[app]['weekly'][week]['runtime'] += runtime
save_app_state_variable('kano-tracker', 'app_stats', app_stats)
# Close the lock
tracker_store.close()
def save_hardware_info():
"""Saves hardware information related to the Raspberry Pi / Kano Kit"""
from kano.logging import logger
from kano.utils.hardware import get_cpu_id, get_mac_address, \
detect_kano_keyboard
logger.info('save_hardware_info')
state = {
'cpu_id': get_cpu_id(),
'mac_address': get_mac_address(),
'kano_keyboard': detect_kano_keyboard(),
}
save_app_state_variable('kano-tracker', 'hardware_info', state)
def save_kano_version():
"""Saves a dict of os-version: time values,
to keep track of the users update process"""
updates = load_app_state_variable('kano-tracker', 'versions')
if not updates:
updates = dict()
version_now = read_file_contents('/etc/kanux_version')
if not version_now:
return
version_now = version_now.replace('.', '_')
time_now = datetime.datetime.utcnow().isoformat()
updates[version_now] = time_now
save_app_state_variable('kano-tracker', 'versions', updates)
def get_tracker_events(old_only=False):
""" Read the events log and return a dictionary with all of them.
:param old_only: Don't return events from the current boot.
:type old_only: boolean
:returns: A dictionary suitable to be sent to the tracker endpoint.
:rtype: dict
"""
data = {'events': []}
try:
rf = open_locked(tracker_events_file, 'r')
except IOError as e:
logger.error("Error opening the tracker events file {}".format(e))
else:
with rf:
for event_line in rf.readlines():
try:
event = json.loads(event_line)
except:
logger.warn("Found a corrupted event, skipping.")
if _validate_event(event) and event['token'] != TOKEN:
data['events'].append(event)
return data
def _validate_event(event):
""" Check whether the event is correct so the API won't reject it.
:param event: The event data.
:type event: dict
:returns: True/False
:rtype: Boolean
"""
if 'type' not in event:
return False
if 'time' not in event or type(event['time']) != int:
return False
if 'timezone_offset' not in event or type(event['timezone_offset']) != int:
return False
if 'os_version' not in event:
return False
if 'cpu_id' not in event:
return False
if 'token' not in event:
return False
if event['timezone_offset'] < -24*60*60 or \
event['timezone_offset'] > 24*60*60:
return False
return True
def clear_tracker_events(old_only=True):
""" Truncate the events file, removing all the cached data.
:param old_only: Don't remove data from the current boot.
:type old_only: boolean
"""
try:
rf = open_locked(tracker_events_file, 'r')
except IOError as e:
logger.error("Error opening tracking events file {}".format(e))
else:
with rf:
events = []
for event_line in rf.readlines():
try:
event = json.loads(event_line)
if 'token' in event and event['token'] == TOKEN:
events.append(event_line)
except:
logger.warn("Found a corrupted event, skipping.")
with open(tracker_events_file, 'w') as wf:
for event_line in events:
wf.write(event_line)
if 'SUDO_USER' in os.environ:
chown_path(tracker_events_file)
| gpl-2.0 |
miguelgrinberg/heat | heat/tests/api/openstack_v1/test_stacks.py | 2 | 93167 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from oslo_config import cfg
import six
import webob.exc
import heat.api.middleware.fault as fault
import heat.api.openstack.v1.stacks as stacks
from heat.common import exception as heat_exc
from heat.common import identifier
from heat.common import policy
from heat.common import template_format
from heat.common import urlfetch
from heat.rpc import api as rpc_api
from heat.rpc import client as rpc_client
from heat.tests.api.openstack_v1 import tools
from heat.tests import common
class InstantiationDataTest(common.HeatTestCase):
def test_parse_error_success(self):
with stacks.InstantiationData.parse_error_check('Garbage'):
pass
def test_parse_error(self):
def generate_error():
with stacks.InstantiationData.parse_error_check('Garbage'):
raise ValueError
self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
def test_parse_error_message(self):
# make sure the parser error gets through to the caller.
bad_temp = '''
heat_template_version: '2013-05-23'
parameters:
KeyName:
type: string
description: bla
'''
def generate_error():
with stacks.InstantiationData.parse_error_check('foo'):
template_format.parse(bad_temp)
parse_ex = self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
self.assertIn('foo', six.text_type(parse_ex))
def test_stack_name(self):
body = {'stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertEqual('wibble', data.stack_name())
def test_stack_name_missing(self):
body = {'not the stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.stack_name)
def test_template_inline(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(template, data.template())
def test_template_string_json(self):
template = ('{"heat_template_version": "2013-05-23",'
'"foo": "bar", "blarg": "wibble"}')
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(json.loads(template), data.template())
def test_template_string_yaml(self):
template = '''HeatTemplateFormatVersion: 2012-12-12
foo: bar
blarg: wibble
'''
parsed = {u'HeatTemplateFormatVersion': u'2012-12-12',
u'blarg': u'wibble',
u'foo': u'bar'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(parsed, data.template())
def test_template_url(self):
template = {'heat_template_version': '2013-05-23',
'foo': 'bar',
'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
urlfetch.get(url).AndReturn(json.dumps(template))
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_priority(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template': template, 'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_missing(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the template': template}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.template)
def test_parameters(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
body = {'parameters': params,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(body, data.environment())
def test_environment_only_params(self):
env = {'parameters': {'foo': 'bar', 'blarg': 'wibble'}}
body = {'environment': env}
data = stacks.InstantiationData(body)
self.assertEqual(env, data.environment())
def test_environment_and_parameters(self):
body = {'parameters': {'foo': 'bar'},
'environment': {'parameters': {'blarg': 'wibble'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_parameters_override_environment(self):
# This tests that the cli parameters will override
# any parameters in the environment.
body = {'parameters': {'foo': 'bar',
'tester': 'Yes'},
'environment': {'parameters': {'blarg': 'wibble',
'tester': 'fail'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar',
'tester': 'Yes'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_environment_bad_format(self):
env = {'somethingnotsupported': {'blarg': 'wibble'}}
body = {'environment': json.dumps(env)}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.environment)
def test_environment_missing(self):
env = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the environment': env}
data = stacks.InstantiationData(body)
self.assertEqual({'parameters': {}, 'encrypted_param_names': [],
'parameter_defaults': {}, 'resource_registry': {}},
data.environment())
def test_args(self):
body = {
'parameters': {},
'environment': {},
'stack_name': 'foo',
'template': {},
'template_url': 'http://example.com/',
'timeout_mins': 60,
}
data = stacks.InstantiationData(body)
self.assertEqual({'timeout_mins': 60}, data.args())
@mock.patch.object(policy.Enforcer, 'enforce')
class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'''
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
'''
def setUp(self):
super(StackControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8004
cfgopts = DummyConfig()
self.controller = stacks.StackController(options=cfgopts)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {},
u'outputs': [],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.index(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'stack_status': u'CREATE_COMPLETE'
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelists_pagination_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
'sort_keys': 'fake sort keys',
'marker': 'fake marker',
'sort_dir': 'fake sort dir',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(13, len(engine_args))
self.assertIn('limit', engine_args)
self.assertIn('sort_keys', engine_args)
self.assertIn('marker', engine_args)
self.assertIn('sort_dir', engine_args)
self.assertIn('filters', engine_args)
self.assertIn('tenant_safe', engine_args)
self.assertNotIn('balrog', engine_args)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_limit_not_int(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'limit': 'not-an-int'}
req = self._get('/stacks', params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req,
tenant_id=self.tenant)
self.assertEqual("Only integer is acceptable by 'limit'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelist_filter_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'id': 'fake id',
'status': 'fake status',
'name': 'fake name',
'action': 'fake action',
'username': 'fake username',
'tenant': 'fake tenant',
'owner_id': 'fake owner-id',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(7, len(filters))
self.assertIn('id', filters)
self.assertIn('status', filters)
self.assertIn('name', filters)
self.assertIn('action', filters)
self.assertIn('username', filters)
self.assertIn('tenant', filters)
self.assertIn('owner_id', filters)
self.assertNotIn('balrog', filters)
def test_index_returns_stack_count_if_with_count_is_true(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock(return_value=0)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
def test_index_doesnt_return_stack_count_if_with_count_is_false(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'false'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock()
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
assert not engine.count_stacks.called
def test_index_with_count_is_invalid(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'invalid_value'}
req = self._get('/stacks', params=params)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req, tenant_id=self.tenant)
excepted = ('Unrecognized value "invalid_value" for "with_count", '
'acceptable values are: true, false')
self.assertIn(excepted, six.text_type(exc))
@mock.patch.object(rpc_client.EngineClient, 'count_stacks')
def test_index_doesnt_break_with_old_engine(self, mock_count_stacks,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
mock_count_stacks.side_effect = AttributeError("Should not exist")
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
def test_index_enforces_global_index_if_global_tenant(self, mock_enforce):
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
self.controller.index(req, tenant_id=self.tenant)
mock_enforce.assert_called_with(action='global_index',
scope=self.controller.REQUEST_SCOPE,
context=self.context)
def test_global_index_sets_tenant_safe_to_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=False)
def test_global_index_show_deleted_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False)
def test_global_index_show_deleted_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
def test_global_index_show_nested_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=False)
def test_global_index_show_nested_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=True)
def test_index_show_deleted_True_with_count_True(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock(return_value=0)
params = {'show_deleted': 'True',
'with_count': 'True'}
req = self._get('/stacks', params=params)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
rpc_client.count_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_detail(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'detail', True)
req = self._get('/stacks/detail')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.detail(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_aterr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(AttributeError())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_index_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/stacks')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_interr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(Exception())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(500, resp.json['code'])
self.assertEqual('Exception', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_create(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_create_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {
"heat_template_version": "2013-05-23",
"parameters": {"app_dbx": {"type": "string"}},
"resources": {"res1": {"type": "GenericResourceType"}}}
parameters = {"app_dbx": "test"}
adopt_data = {
"status": "COMPLETE",
"name": "rtrove1",
"parameters": parameters,
"template": template,
"action": "CREATE",
"id": "8532f0d3-ea84-444e-b2bb-2543bb1496a4",
"resources": {"res1": {
"status": "COMPLETE",
"name": "database_password",
"resource_id": "yBpuUROjfGQ2gKOD",
"action": "CREATE",
"type": "GenericResourceType",
"metadata": {}}}}
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': {},
'timeout_mins': 'not-an-int',
'adopt_stack_data': 'does not matter'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_adopt_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
parameters = {"app_dbx": "test"}
adopt_data = ["Test"]
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.status_code)
self.assertEqual('400 Bad Request', resp.status)
self.assertIn('Invalid adopt data', resp.text)
self.m.VerifyAll()
def test_create_with_files(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'files': {'my.yaml': 'This is the file contents.'},
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {'my.yaml': 'This is the file contents.'},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
result = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, result)
self.m.VerifyAll()
def test_create_err_rpcerr(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True, 3)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
unknown_parameter = heat_exc.UnknownUserParameter(key='a')
missing_parameter = heat_exc.UserParameterMissing(key='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(AttributeError()))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(unknown_parameter))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(missing_parameter))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UnknownUserParameter', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UserParameterMissing', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_existing(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackExists(stack_name='s')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(409, resp.json['code'])
self.assertEqual('StackExists', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 'not-an-int'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_create_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', False)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_create_err_engine(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackValidationFailed(message='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('StackValidationFailed', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_stack_bad_reqest(self, mock_enforce):
cfg.CONF.set_override('debug', True)
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.HTTPExceptionDisguise(webob.exc.HTTPBadRequest())
self.controller.create = mock.MagicMock(side_effect=error)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create, req, body)
# When HTTP disguised exceptions reach the fault app, they are
# converted into regular responses, just like non-HTTP exceptions
self.assertEqual(400, resp.json['code'])
self.assertEqual('HTTPBadRequest', resp.json['error']['type'])
self.assertIsNotNone(resp.json['error']['traceback'])
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_stack(self, mock_format, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
body = {'stack_name': 'foo', 'template': {}}
req = self._get('/stacks/preview', params={})
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack'
result = self.controller.preview(req, tenant_id=self.tenant, body=body)
self.assertEqual({'stack': 'formatted_stack'}, result)
def test_lookup(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name)
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_arn(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks%s' % identity.arn_url_path())
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup,
req, tenant_id=identity.tenant, stack_name=identity.arn())
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_err_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_lookup_resource(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s/resources' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name,
path='resources')
self.assertEqual(self._url(identity) + '/resources',
found.location)
self.m.VerifyAll()
def test_lookup_resource_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_resource_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
parameters = {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'}
outputs = [{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}]
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': True,
u'timeout_mins':60,
u'capabilities': [],
}
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.show(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
expected = {
'stack': {
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '6',
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'capabilities': [],
u'notification_topics': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_show_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_show_invalidtenant(self, mock_enforce):
identity = identifier.HeatIdentifier('wibble', 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_show_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_get_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
template = {u'Foo': u'bar'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndReturn(template)
self.m.ReplayAll()
response = self.controller.template(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(template, response)
self.m.VerifyAll()
def test_get_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s/template'
% identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_get_template_err_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {u'parameters': parameters,
u'encrypted_param_names': [],
u'parameter_defaults': {},
u'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', False)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_update_with_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_existing_parameters_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30,
'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patch_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_patch, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_with_existing_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': {},
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': parameters,
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_delete(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndReturn(None)
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPNoContent,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.m.VerifyAll()
def test_delete_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_abandon(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns json data on abandon completion
expected = {"name": "test", "id": "123"}
rpc_client.EngineClient.call(
req.context,
('abandon_stack', {'stack_identity': dict(identity)})
).AndReturn(expected)
self.m.ReplayAll()
ret = self.controller.abandon(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(expected, ret)
self.m.VerifyAll()
def test_abandon_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.abandon,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_delete_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_validate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
engine_response = {
u'Description': u'blah',
u'Parameters': [
{
u'NoEcho': u'false',
u'ParameterKey': u'InstanceType',
u'Description': u'Instance type'
}
]
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}})
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.validate_template(req,
tenant_id=self.tenant,
body=body)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_validate_template_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}})
).AndReturn({'Error': 'fubar'})
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.m.VerifyAll()
def test_validate_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', False)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_resource_types(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
engine_response = ['AWS::EC2::Instance',
'AWS::EC2::EIP',
'AWS::EC2::EIPAssociation']
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, ('list_resource_types', {'support_status': None}),
version="1.1"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_resource_types(req,
tenant_id=self.tenant)
self.assertEqual({'resource_types': engine_response}, response)
self.m.VerifyAll()
def test_list_resource_types_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
error = heat_exc.ResourceTypeNotFound(type_name='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('list_resource_types',
{'support_status': None},
), version="1.1"
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_list_resource_types_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', False)
req = self._get('/resource_types')
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_template_versions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_versions', True)
req = self._get('/template_versions')
engine_response = [
{'version': 'heat_template_version.2013-05-23', 'type': 'hot'},
{'version': 'AWSTemplateFormatVersion.2010-09-09', 'type': 'cfn'}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, ('list_template_versions', {}),
version="1.11"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_versions(
req, tenant_id=self.tenant)
self.assertEqual({'template_versions': engine_response}, response)
self.m.VerifyAll()
def test_list_template_functions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_functions', True)
req = self._get('/template_versions/t1/functions')
engine_response = [
{'functions': 'func1', 'description': 'desc1'},
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, (
'list_template_functions', {'template_version': 't1'}),
version="1.13"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_functions(
req, tenant_id=self.tenant, template_version='t1')
self.assertEqual({'template_functions': engine_response}, response)
self.m.VerifyAll()
def test_resource_schema(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/ResourceWithProps')
type_name = 'ResourceWithProps'
engine_response = {
'resource_type': type_name,
'properties': {
'Foo': {'type': 'string', 'required': False},
},
'attributes': {
'foo': {'description': 'A generic attribute'},
'Foo': {'description': 'Another generic attribute'},
},
'support_status': {
'status': 'SUPPORTED',
'version': None,
'message': None,
},
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.resource_schema(req,
tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_resource_schema_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
error = heat_exc.ResourceTypeNotFound(type_name='BogusResourceType')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_resource_schema_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', False)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_generate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/TEST_TYPE/template')
engine_response = {'Type': 'TEST_TYPE'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'TEST_TYPE',
'template_type': 'cfn'}),
version='1.9'
).AndReturn(engine_response)
self.m.ReplayAll()
self.controller.generate_template(req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.m.VerifyAll()
def test_generate_template_invalid_template_type(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
params = {'template_type': 'invalid'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
req = self._get('/resource_types/TEST_TYPE/template',
params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.assertIn('Template type is not supported: Invalid template '
'type "invalid", valid types are: cfn, hot.',
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_generate_template_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/NOT_FOUND/template')
error = heat_exc.ResourceTypeNotFound(type_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'NOT_FOUND',
'template_type': 'cfn'}),
version='1.9'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='NOT_FOUND')
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_generate_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', False)
req = self._get('/resource_types/NOT_FOUND/template')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='blah')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
class StackSerializerTest(common.HeatTestCase):
def setUp(self):
super(StackSerializerTest, self).setUp()
self.serializer = stacks.StackSerializer()
def test_serialize_create(self):
result = {'stack':
{'id': '1',
'links': [{'href': 'location', "rel": "self"}]}}
response = webob.Response()
response = self.serializer.create(response, result)
self.assertEqual(201, response.status_int)
self.assertEqual('location', response.headers['Location'])
self.assertEqual('application/json', response.headers['Content-Type'])
| apache-2.0 |
thequbit/yellr-server | yellr-serv/yellrserv/ep_client_assignment.py | 1 | 2188 | from pyramid.view import view_config
import client_utils
import utils
@view_config(route_name='get_assignments.json')
def get_assignments(request):
result = {'success': False}
status_code = 200
#try:
if True:
success, error_text, language_code, lat, lng, \
client = client_utils.register_client(request)
if success == False:
raise Exception(error_text)
assignments = client_utils.get_assignments(
client_id = client.client_id,
language_code = language_code,
lat = lat,
lng = lng,
)
result['assignments'] = assignments
result['success'] = True
#except Exception, e:
# status_code = 400
# result['error_text'] = str(e)
client_utils.log_client_action(
client = client,
url = 'get_assignments.json',
lat = lat,
lng = lng,
request = request,
result = result,
success = success,
)
return utils.make_response(result, status_code)
'''
@view_config(route_name='get_poll_results.json')
def get_poll_results(request):
result = {'success': False}
status_code = 200
try:
success, error_text, language_code, lat, lng, \
client = client_utils.register_client(request)
if success == False:
raise Exception(error_text)
#assignments = client_utils.get_assignments(
# client_id = client.client_id,
# language_code = language_code,
# lat = lat,
# lng = lng,
#)
assignment_id = request.GET['assignment_id']
results = client_utils.get_poll_results(
assignment_id = assignment_id,
)
result['results'] = results
result['success'] = True
except Exception, e:
status_code = 400
result['error_text'] = str(e)
client_utils.log_client_action(
client = client,
url = 'get_poll_results.json',
lat = lat,
lng = lng,
request = request,
result = result,
success = success,
)
return utils.make_response(result, status_code)
'''
| agpl-3.0 |
jhsenjaliya/incubator-airflow | airflow/migrations/versions/64de9cddf6c9_add_task_fails_journal_table.py | 59 | 1452 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add task fails journal table
Revision ID: 64de9cddf6c9
Revises: 211e584da130
Create Date: 2016-08-03 14:02:59.203021
"""
# revision identifiers, used by Alembic.
revision = '64de9cddf6c9'
down_revision = '211e584da130'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'task_fail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('task_id', sa.String(length=250), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('execution_date', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=True),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('task_fail')
| apache-2.0 |
GeoffreyFrogeye/syncthing-gtk | syncthing_gtk/timermanager.py | 2 | 1640 | #!/usr/bin/env python2
"""
Syncthing-GTK - Timer manager
Simple abstract class for named, cancelable timers
"""
from __future__ import unicode_literals
from gi.repository import GLib
class TimerManager(object):
def __init__(self):
self._timers = {}
def timer(self, name, delay, callback, *data, **kwdata):
"""
Runs callback after specified number of seconds. Uses
GLib.timeout_add_seconds with small wrapping to allow named
timers to be canceled by reset() call
"""
method = GLib.timeout_add_seconds
if delay < 1 and delay > 0:
method = GLib.timeout_add
delay = delay * 1000.0
if name is None:
# No wrapping is needed, call GLib directly
method(delay, callback, *data, **kwdata)
else:
if name in self._timers:
# Cancel old timer
GLib.source_remove(self._timers[name])
# Create new one
self._timers[name] = method(delay, self._callback, name, callback, *data, **kwdata)
def timer_active(self, name):
""" Returns True if named timer is active """
return (name in self._timers)
def cancel_timer(self, name):
"""
Cancels named timer. Returns True on success, False if there is no such timer.
"""
if name in self._timers:
GLib.source_remove(self._timers[name])
del self._timers[name]
return True
return False
def cancel_all(self):
""" Cancels all active timers """
for x in self._timers:
GLib.source_remove(self._timers[x])
self._timers = {}
def _callback(self, name, callback, *data, **kwdata):
"""
Removes name from list of active timers and calls real callback.
"""
del self._timers[name]
callback(*data, **kwdata)
return False
| gpl-2.0 |
scigghia/account-payment | __unported__/purchase_payment/__openerp__.py | 4 | 2019 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 Pexego S.L. (http://www.pexego.es) All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Purchase Payment Type and Terms",
"version" : "1.0",
"author" : "Pexego",
"website": "www.pexego.es",
"license" : "GPL-3",
"category" : 'Generic Modules/Sales & Purchases',
"description": """Adds payment info to the purchase process.
Adds payment type, terms, and bank account to the purchase orders.
Allows to set different default payment terms for purchases (the partners
will have payment terms and supplier payment terms).
The payment terms, payment type and bank account default values for the
purchase will be taken from the partner.
Invoices created from purchase orders, or from pickings related to purchase
orders, will inherit this payment info from the payment order.
""",
"depends" : [
"account_payment",
"account_payment_extension",
"purchase",
"stock",
],
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [
"purchase_payment_view.xml",
],
"active": False,
"installable": False,
}
| agpl-3.0 |
raboof/supybot | src/utils/iter.py | 14 | 5180 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from __future__ import division
import sys
import new
import random
from itertools import *
def len(iterable):
"""Returns the length of an iterator."""
i = 0
for _ in iterable:
i += 1
return i
def trueCycle(iterable):
while 1:
yielded = False
for x in iterable:
yield x
yielded = True
if not yielded:
raise StopIteration
if sys.version_info < (2, 4, 0):
def groupby(key, iterable):
if key is None:
key = lambda x: x
it = iter(iterable)
value = it.next() # If there are no items, this takes an early exit
oldkey = key(value)
group = [value]
for value in it:
newkey = key(value)
if newkey != oldkey:
yield group
group = []
oldkey = newkey
group.append(value)
yield group
def partition(p, iterable):
"""Partitions an iterable based on a predicate p.
Returns a (yes,no) tuple"""
no = []
yes = []
for elt in iterable:
if p(elt):
yes.append(elt)
else:
no.append(elt)
return (yes, no)
def any(p, iterable):
"""Returns true if any element in iterable satisfies predicate p."""
for elt in ifilter(p, iterable):
return True
else:
return False
def all(p, iterable):
"""Returns true if all elements in iterable satisfy predicate p."""
for elt in ifilterfalse(p, iterable):
return False
else:
return True
def choice(iterable):
if isinstance(iterable, (list, tuple)):
return random.choice(iterable)
else:
n = 1
m = new.module('') # Guaranteed unique value.
ret = m
for x in iterable:
if random.random() < 1/n:
ret = x
n += 1
if ret is m:
raise IndexError
return ret
def flatten(iterable, strings=False):
"""Flattens a list of lists into a single list. See the test for examples.
"""
for elt in iterable:
if not strings and isinstance(elt, basestring):
yield elt
else:
try:
for x in flatten(elt):
yield x
except TypeError:
yield elt
def split(isSeparator, iterable, maxsplit=-1, yieldEmpty=False):
"""split(isSeparator, iterable, maxsplit=-1, yieldEmpty=False)
Splits an iterator based on a predicate isSeparator."""
if isinstance(isSeparator, basestring):
f = lambda s: s == isSeparator
else:
f = isSeparator
acc = []
for element in iterable:
if maxsplit == 0 or not f(element):
acc.append(element)
else:
maxsplit -= 1
if acc or yieldEmpty:
yield acc
acc = []
if acc or yieldEmpty:
yield acc
def ilen(iterable):
i = 0
for _ in iterable:
i += 1
return i
def startswith(long, short):
longI = iter(long)
shortI = iter(short)
try:
while True:
if shortI.next() != longI.next():
return False
except StopIteration:
return True
def limited(iterable, limit):
i = limit
iterable = iter(iterable)
try:
while i:
yield iterable.next()
i -= 1
except StopIteration:
raise ValueError, 'Expected %s elements in iterable (%r), got %s.' % \
(limit, iterable, limit-i)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
boedy1996/SPARC | geonode/maps/urls.py | 1 | 4070 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
js_info_dict = {
'packages': ('geonode.maps',),
}
urlpatterns = patterns('geonode.maps.views',
url(r'^$',
TemplateView.as_view(
template_name='maps/map_list.html'),
name='maps_browse'),
url(r'^new$', 'new_map', name="new_map"),
url(r'^new/data$', 'new_map_json', name='new_map_json'),
url(r'^checkurl/?$', 'ajax_url_lookup'),
url(r'^snapshot/create/?$', 'snapshot_create'),
url(r'^(?P<mapid>[^/]+)$',
'map_detail',
name='map_detail'),
url(r'^(?P<mapid>[^/]+)/view$',
'map_view',
name='map_view'),
url(r'^(?P<mapid>[^/]+)/data$',
'map_json',
name='map_json'),
url(r'^(?P<mapid>[^/]+)/download$',
'map_download',
name='map_download'),
url(r'^(?P<mapid>[^/]+)/wmc$',
'map_wmc',
name='map_wmc'),
url(r'^(?P<mapid>[^/]+)/wms$',
'map_wms',
name='map_wms'),
url(r'^(?P<mapid>[^/]+)/remove$',
'map_remove',
name='map_remove'),
url(r'^(?P<mapid>[^/]+)/metadata$',
'map_metadata',
name='map_metadata'),
url(r'^(?P<mapid>[^/]+)/embed$',
'map_embed',
name='map_embed'),
url(r'^(?P<mapid>[^/]+)/history$',
'ajax_snapshot_history'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/view$',
'map_view'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/info$',
'map_detail'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/embed/?$',
'map_embed'),
url(
r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/data$',
'map_json',
name='map_json'),
url(r'^check/$',
'map_download_check',
name='map_download_check'),
url(r'^embed/$', 'map_embed', name='map_embed'),
url(r'^(?P<layername>[^/]*)/attributes',
'maplayer_attributes',
name='maplayer_attributes'),
# url(r'^change-poc/(?P<ids>\w+)$', 'change_poc', name='maps_change_poc'),
)
| gpl-3.0 |
Endika/odoomrp-wip | quality_control_sale_stock/__openerp__.py | 19 | 1562 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c)
# 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# 2015 AvanzOsc (http://www.avanzosc.es)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Quality control - Sale stock",
"version": "8.0.1.0.0",
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "http://www.odoomrp.com",
"contributors": [
"Pedro M. Baeza <[email protected]",
],
"category": "Quality control",
"depends": [
'quality_control_stock',
'sale_stock',
],
"data": [
'security/ir.model.access.csv',
],
"installable": True,
"auto_install": True,
}
| agpl-3.0 |
PeterWangPo/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/feeders.py | 121 | 4477 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.config.committervalidator import CommitterValidator
from webkitpy.tool.grammar import pluralize
_log = logging.getLogger(__name__)
class AbstractFeeder(object):
def __init__(self, tool):
self._tool = tool
def feed(self):
raise NotImplementedError("subclasses must implement")
class CommitQueueFeeder(AbstractFeeder):
queue_name = "commit-queue"
def __init__(self, tool):
AbstractFeeder.__init__(self, tool)
self.committer_validator = CommitterValidator(self._tool)
def _update_work_items(self, item_ids):
# FIXME: This is the last use of update_work_items, the commit-queue
# should move to feeding patches one at a time like the EWS does.
self._tool.status_server.update_work_items(self.queue_name, item_ids)
_log.info("Feeding %s items %s" % (self.queue_name, item_ids))
def feed(self):
patches = self._validate_patches()
patches = self._patches_with_acceptable_review_flag(patches)
patches = sorted(patches, self._patch_cmp)
patch_ids = [patch.id() for patch in patches]
self._update_work_items(patch_ids)
def _patches_for_bug(self, bug_id):
return self._tool.bugs.fetch_bug(bug_id).commit_queued_patches(include_invalid=True)
# Filters out patches with r? or r-, only r+ or no review are OK to land.
def _patches_with_acceptable_review_flag(self, patches):
return [patch for patch in patches if patch.review() in [None, '+']]
def _validate_patches(self):
# Not using BugzillaQueries.fetch_patches_from_commit_queue() so we can reject patches with invalid committers/reviewers.
bug_ids = self._tool.bugs.queries.fetch_bug_ids_from_commit_queue()
all_patches = sum([self._patches_for_bug(bug_id) for bug_id in bug_ids], [])
return self.committer_validator.patches_after_rejecting_invalid_commiters_and_reviewers(all_patches)
def _patch_cmp(self, a, b):
# Sort first by is_rollout, then by attach_date.
# Reversing the order so that is_rollout is first.
rollout_cmp = cmp(b.is_rollout(), a.is_rollout())
if rollout_cmp != 0:
return rollout_cmp
return cmp(a.attach_date(), b.attach_date())
class EWSFeeder(AbstractFeeder):
def __init__(self, tool):
self._ids_sent_to_server = set()
AbstractFeeder.__init__(self, tool)
def feed(self):
ids_needing_review = set(self._tool.bugs.queries.fetch_attachment_ids_from_review_queue())
new_ids = ids_needing_review.difference(self._ids_sent_to_server)
_log.info("Feeding EWS (%s, %s new)" % (pluralize("r? patch", len(ids_needing_review)), len(new_ids)))
for attachment_id in new_ids: # Order doesn't really matter for the EWS.
self._tool.status_server.submit_to_ews(attachment_id)
self._ids_sent_to_server.add(attachment_id)
| bsd-3-clause |
wisdark/Empire | lib/stagers/osx/dylib.py | 11 | 3528 | from lib.common import helpers
import os
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'dylib',
'Author': ['@xorrior'],
'Description': ('Generates a dylib.'),
'Comments': [
''
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'python'
},
'Architecture' : {
'Description' : 'Architecture: x86/x64',
'Required' : True,
'Value' : 'x86'
},
'SafeChecks' : {
'Description' : 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.',
'Required' : True,
'Value' : 'True'
},
'Hijacker' : {
'Description' : 'Generate dylib to be used in a Dylib Hijack. This provides a dylib with the LC_REEXPORT_DYLIB load command. The path will serve as a placeholder.',
'Required' : True,
'Value' : 'False'
},
'OutFile' : {
'Description' : 'File to write the dylib.',
'Required' : True,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
arch = self.options['Architecture']['Value']
hijacker = self.options['Hijacker']['Value']
safeChecks = self.options['SafeChecks']['Value']
if arch == "":
print helpers.color("[!] Please select a valid architecture")
return ""
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language=language, userAgent=userAgent, safeChecks=safeChecks)
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
else:
launcher = launcher.strip('echo').strip(' | python &').strip("\"")
dylib = self.mainMenu.stagers.generate_dylib(launcherCode=launcher, arch=arch, hijacker=hijacker)
return dylib
| bsd-3-clause |
apple/llvm-project | lldb/test/API/lang/c/set_values/TestSetValues.py | 8 | 5017 | """Test settings and readings of program variables."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SetValuesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break inside main().
self.line1 = line_number('main.c', '// Set break point #1.')
self.line2 = line_number('main.c', '// Set break point #2.')
self.line3 = line_number('main.c', '// Set break point #3.')
self.line4 = line_number('main.c', '// Set break point #4.')
self.line5 = line_number('main.c', '// Set break point #5.')
def test(self):
"""Test settings and readings of program variables."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Set breakpoints on several places to set program variables.
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line1, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line2, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line3, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line4, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line5, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# main.c:15
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(char) i = 'a'")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 'b'")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(char) i = 'b'")
self.runCmd("continue")
# main.c:36
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\((short unsigned int|unsigned short)\) i = 33"])
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 333")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\((short unsigned int|unsigned short)\) i = 333"])
self.runCmd("continue")
# main.c:57
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long) i = 33")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 33333")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long) i = 33333")
self.runCmd("continue")
# main.c:78
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(double) i = 2.25")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 1.5")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(double) i = 1.5")
self.runCmd("continue")
# main.c:85
# Check that 'frame variable --show-types' displays the correct data
# type and value.
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long double) i = 2.25")
# Now set variable 'i' and check that it is correctly displayed.
self.runCmd("expression i = 1.5")
self.expect(
"frame variable --show-types",
VARIABLES_DISPLAYED_CORRECTLY,
startstr="(long double) i = 1.5")
| apache-2.0 |
duramato/CouchPotatoServer | libs/dateutil/parser.py | 103 | 33736 | # -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard Python
datetime module.
"""
from __future__ import unicode_literals
__license__ = "Simplified BSD"
import datetime
import string
import time
import collections
try:
from io import StringIO
except ImportError:
from io import StringIO
from six import text_type, binary_type, integer_types
from . import relativedelta
from . import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
class _timelex(object):
def __init__(self, instream):
if isinstance(instream, text_type):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst = False, yearfirst = False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year - self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser(object):
def __init__(self, info = None):
self.info = info or parserinfo()
def parse(self, timestr, default = None,
ignoretz = False, tzinfos = None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour = 0, minute = 0,
second = 0, microsecond = 0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("unknown string format")
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret + relativedelta.relativedelta(weekday = res.weekday)
if not ignoretz:
if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos:
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("offset must be tzinfo subclass, " \
"tz string, or int offset")
ret = ret.replace(tzinfo = tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo = tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo = tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo = tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst = None, yearfirst = None, fuzzy = False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i - 1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i - 1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i - 1]
if not ymd and l[i - 1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li == 8:
# YYYYMMDD
s = l[i - 1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i - 1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i + 1 < len_l and l[i] == ' ' and
info.hms(l[i + 1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value % 1:
res.minute = int(60 * (value % 1))
elif idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i == len_l and l[i - 2] == ' ' and info.hms(l[i - 3]) is not None:
# X h MM or X m SS
idx = info.hms(l[i - 3]) + 1
if idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
elif i + 1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i + 1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd) - 1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i + 1 < len_l and info.ampm(l[i + 1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i + 1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i + 1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i + 3 < len_l and l[i] == l[i + 2] == ' '
and info.pertain(l[i + 1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i + 3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2]) * 3600 + int(l[i][2:]) * 60
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
res.tzoffset = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2]) * 3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i + 3 < len_l and
info.jump(l[i]) and l[i + 1] == '(' and l[i + 3] == ')' and
3 <= len(l[i + 2]) <= 5 and
not [x for x in l[i + 2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i + 2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo = None, **kwargs):
# Python 2.x support: datetimes return their string presentation as
# bytes in 2.x and unicode in 3.x, so it's reasonable to expect that
# the parser will get both kinds. Internally we use unicode only.
if isinstance(timestr, binary_type):
timestr = timestr.decode()
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2]) * 3600 + int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 + int(l[i + 2]) * 60) * signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset + int(l[i])) * signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 + int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
if i + 1 < len_l and l[i + 1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
# vim:ts=4:sw=4:et
| gpl-3.0 |
rishigb/bro | remindMe/venv/lib/python2.7/site-packages/wheel/bdist_wheel.py | 232 | 17441 | """
Create a wheel (.whl) distribution.
A wheel is a built archive format.
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import wheel
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import pkg_resources
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
from shutil import rmtree
from email.generator import Generator
from distutils.util import get_platform
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
from .util import native, open_for_csv
from .archive import archive_wheelfile
from .pkginfo import read_pkg_info, write_pkg_info
from .metadata import pkginfo_to_dict
from . import pep425tags, metadata
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
plat_name = self.plat_name or get_platform()
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
# PEP 3149
abi_tag = str(get_abi_tag()).lower()
tag = (impl_name + impl_ver, abi_tag, plat_name)
supported_tags = pep425tags.get_supported(
supplied_platform=plat_name if self.plat_name_supplied else None)
# XXX switch to this alternate implementation for non-pure:
assert tag == supported_tags[0]
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel.__version__ + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if not 'license_file' in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json, sort_keys=True)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from wheel.util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
dirs.sort()
for f in sorted(files):
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/test/test_copy_reg.py | 129 | 4256 | import copy_reg
import unittest
from test import test_support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copy_reg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEqual(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code)
copy_reg.add_extension(mod, func, code)
# Should be in the registry.
self.assertTrue(copy_reg._extension_registry[mod, func] == code)
self.assertTrue(copy_reg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assertNotIn(code, copy_reg._extension_cache)
# Redundant registration should be OK.
copy_reg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copy_reg._inverted_registry:
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assertNotIn((mod, func), copy_reg._extension_registry)
# The code *may* be in copy_reg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copy_reg.add_extension(mod, func, code)
copy_reg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000L:
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEqual(copy_reg._slotnames(WithoutSlots), [])
self.assertEqual(copy_reg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copy_reg._slotnames(WithPrivate), expected)
self.assertEqual(copy_reg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copy_reg._slotnames(WithInherited)
result.sort()
self.assertEqual(result, expected)
def test_main():
test_support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Twisted/doc/words/examples/xmpp_client.py | 1 | 2125 | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
from twisted.internet import reactor
from twisted.names.srvconnect import SRVConnector
from twisted.words.xish import domish
from twisted.words.protocols.jabber import xmlstream, client, jid
class XMPPClientConnector(SRVConnector):
def __init__(self, reactor, domain, factory):
SRVConnector.__init__(self, reactor, 'xmpp-client', domain, factory)
def pickServer(self):
host, port = SRVConnector.pickServer(self)
if not self.servers and not self.orderedServers:
# no SRV record, fall back..
port = 5222
return host, port
class Client(object):
def __init__(self, client_jid, secret):
f = client.XMPPClientFactory(client_jid, secret)
f.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.connected)
f.addBootstrap(xmlstream.STREAM_END_EVENT, self.disconnected)
f.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authenticated)
f.addBootstrap(xmlstream.INIT_FAILED_EVENT, self.init_failed)
connector = XMPPClientConnector(reactor, client_jid.host, f)
connector.connect()
def rawDataIn(self, buf):
print "RECV: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace')
def rawDataOut(self, buf):
print "SEND: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace')
def connected(self, xs):
print 'Connected.'
self.xmlstream = xs
# Log all traffic
xs.rawDataInFn = self.rawDataIn
xs.rawDataOutFn = self.rawDataOut
def disconnected(self, xs):
print 'Disconnected.'
reactor.stop()
def authenticated(self, xs):
print "Authenticated."
presence = domish.Element((None, 'presence'))
xs.send(presence)
reactor.callLater(5, xs.sendFooter)
def init_failed(self, failure):
print "Initialization failed."
print failure
self.xmlstream.sendFooter()
client_jid = jid.JID(sys.argv[1])
secret = sys.argv[2]
c = Client(client_jid, secret)
reactor.run()
| apache-2.0 |
mattclay/ansible | test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py | 13 | 14512 | """Sanity test to check integration test aliases."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import textwrap
import os
from ... import types as t
from . import (
SanityVersionNeutral,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...config import (
SanityConfig,
)
from ...target import (
filter_targets,
walk_posix_integration_targets,
walk_windows_integration_targets,
walk_integration_targets,
walk_module_targets,
)
from ..integration.cloud import (
get_cloud_platforms,
)
from ...io import (
read_text_file,
)
from ...util import (
display,
find_python,
raw_command,
)
from ...util_common import (
write_json_test_results,
ResultType,
)
class IntegrationAliasesTest(SanityVersionNeutral):
"""Sanity test to evaluate integration test aliases."""
CI_YML = '.azure-pipelines/azure-pipelines.yml'
TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future
DISABLED = 'disabled/'
UNSTABLE = 'unstable/'
UNSUPPORTED = 'unsupported/'
EXPLAIN_URL = 'https://docs.ansible.com/ansible/devel/dev_guide/testing/sanity/integration-aliases.html'
TEMPLATE_DISABLED = """
The following integration tests are **disabled** [[explain]({explain_url}#disabled)]:
{tests}
Consider fixing the integration tests before or alongside changes.
"""
TEMPLATE_UNSTABLE = """
The following integration tests are **unstable** [[explain]({explain_url}#unstable)]:
{tests}
Tests may need to be restarted due to failures unrelated to changes.
"""
TEMPLATE_UNSUPPORTED = """
The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]:
{tests}
Consider running the tests manually or extending test infrastructure to add support.
"""
TEMPLATE_UNTESTED = """
The following modules have **no integration tests** [[explain]({explain_url}#untested)]:
{tests}
Consider adding integration tests before or alongside changes.
"""
ansible_only = True
def __init__(self):
super(IntegrationAliasesTest, self).__init__()
self._ci_config = {} # type: t.Dict[str, t.Any]
self._ci_test_groups = {} # type: t.Dict[str, t.List[int]]
@property
def can_ignore(self): # type: () -> bool
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self): # type: () -> bool
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
def load_ci_config(self, args): # type: (SanityConfig) -> t.Dict[str, t.Any]
"""Load and return the CI YAML configuration."""
if not self._ci_config:
self._ci_config = self.load_yaml(args, self.CI_YML)
return self._ci_config
@property
def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]]
"""Return a dictionary of CI test names and their group(s)."""
if not self._ci_test_groups:
test_groups = {}
for stage in self._ci_config['stages']:
for job in stage['jobs']:
if job.get('template') != 'templates/matrix.yml':
continue
parameters = job['parameters']
groups = parameters.get('groups', [])
test_format = parameters.get('testFormat', '{0}')
test_group_format = parameters.get('groupFormat', '{0}/{{1}}')
for target in parameters['targets']:
test = target.get('test') or target.get('name')
if groups:
tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups]
else:
tests_formatted = [test_format.format(test)]
for test_formatted in tests_formatted:
parts = test_formatted.split('/')
key = parts[0]
if key in ('sanity', 'units'):
continue
try:
group = int(parts[-1])
except ValueError:
continue
if group < 1 or group > 99:
continue
group_set = test_groups.setdefault(key, set())
group_set.add(group)
self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items())
return self._ci_test_groups
def format_test_group_alias(self, name, fallback=''):
"""
:type name: str
:type fallback: str
:rtype: str
"""
group_numbers = self.ci_test_groups.get(name, None)
if group_numbers:
if min(group_numbers) != 1:
display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True)
if max(group_numbers) != len(group_numbers):
display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True)
if max(group_numbers) > 9:
alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1)))
elif len(group_numbers) > 1:
alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers))
else:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers))
elif fallback:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1)
else:
raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML))
return alias
def load_yaml(self, args, path): # type: (SanityConfig, str) -> t.Dict[str, t.Any]
"""Load the specified YAML file and return the contents."""
yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py')
python = find_python(args.python_version)
return json.loads(raw_command([python, yaml_to_json_path], data=read_text_file(path), capture=True)[0])
def test(self, args, targets):
"""
:type args: SanityConfig
:type targets: SanityTargets
:rtype: TestResult
"""
if args.explain:
return SanitySuccess(self.name)
if not os.path.isfile(self.CI_YML):
return SanityFailure(self.name, messages=[SanityMessage(
message='file missing',
path=self.CI_YML,
)])
results = dict(
comments=[],
labels={},
)
self.load_ci_config(args)
self.check_changes(args, results)
write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results)
messages = []
messages += self.check_posix_targets(args)
messages += self.check_windows_targets()
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
def check_posix_targets(self, args):
"""
:type args: SanityConfig
:rtype: list[SanityMessage]
"""
posix_targets = tuple(walk_posix_integration_targets())
clouds = get_cloud_platforms(args, posix_targets)
cloud_targets = ['cloud/%s/' % cloud for cloud in clouds]
all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], include=True, directories=False, errors=False))
invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, directories=False, errors=False))
messages = []
for target in invalid_cloud_targets:
for alias in target.aliases:
if alias.startswith('cloud/') and alias != 'cloud/':
if any(alias.startswith(cloud_target) for cloud_target in cloud_targets):
continue
messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path))
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False,
directories=False, errors=False)),
find=self.format_test_group_alias('linux').replace('linux', 'posix'),
find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX],
)
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], include=True, directories=False,
errors=False)),
find=self.format_test_group_alias('generic'),
)
for cloud in clouds:
if cloud == 'httptester':
find = self.format_test_group_alias('linux').replace('linux', 'posix')
find_incidental = ['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX]
else:
find = self.format_test_group_alias(cloud, 'generic')
find_incidental = ['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX]
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)),
find=find,
find_incidental=find_incidental,
)
return messages
def check_windows_targets(self):
"""
:rtype: list[SanityMessage]
"""
windows_targets = tuple(walk_windows_integration_targets())
messages = []
messages += self.check_ci_group(
targets=windows_targets,
find=self.format_test_group_alias('windows'),
find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX],
)
return messages
def check_ci_group(self, targets, find, find_incidental=None):
"""
:type targets: tuple[CompletionTarget]
:type find: str
:type find_incidental: list[str] | None
:rtype: list[SanityMessage]
"""
all_paths = set(target.path for target in targets)
supported_paths = set(target.path for target in filter_targets(targets, [find], include=True, directories=False, errors=False))
unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], include=True, directories=False, errors=False))
if find_incidental:
incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, include=True, directories=False, errors=False))
else:
incidental_paths = set()
unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
conflicting_paths = supported_paths & unsupported_paths
unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
messages = []
for path in unassigned_paths:
messages.append(SanityMessage(unassigned_message, '%s/aliases' % path))
for path in conflicting_paths:
messages.append(SanityMessage(conflicting_message, '%s/aliases' % path))
return messages
def check_changes(self, args, results):
"""
:type args: SanityConfig
:type results: dict[str, any]
"""
integration_targets = list(walk_integration_targets())
module_targets = list(walk_module_targets())
integration_targets_by_name = dict((target.name, target) for target in integration_targets)
module_names_by_path = dict((target.path, target.module) for target in module_targets)
disabled_targets = []
unstable_targets = []
unsupported_targets = []
for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
for target in args.metadata.change_description.focused_command_targets[command]:
if self.DISABLED in integration_targets_by_name[target].aliases:
disabled_targets.append(target)
elif self.UNSTABLE in integration_targets_by_name[target].aliases:
unstable_targets.append(target)
elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
unsupported_targets.append(target)
untested_modules = []
for path in args.metadata.change_description.no_integration_paths:
module = module_names_by_path.get(path)
if module:
untested_modules.append(module)
comments = [
self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
]
comments = [comment for comment in comments if comment]
labels = dict(
needs_tests=bool(untested_modules),
disabled_tests=bool(disabled_targets),
unstable_tests=bool(unstable_targets),
unsupported_tests=bool(unsupported_targets),
)
results['comments'] += comments
results['labels'].update(labels)
def format_comment(self, template, targets):
"""
:type template: str
:type targets: list[str]
:rtype: str | None
"""
if not targets:
return None
tests = '\n'.join('- %s' % target for target in targets)
data = dict(
explain_url=self.EXPLAIN_URL,
tests=tests,
)
message = textwrap.dedent(template).strip().format(**data)
return message
| gpl-3.0 |
nevillehay/gbi-nevillehay | src/error.py | 2 | 1775 | #!/usr/local/bin/python
# Copyright (c) 2011 GhostBSD
#
# See COPYING for licence terms.
import gtk
from subprocess import Popen
lyrics = """
Please report to
, and be sure
to provide /tmp/.pc-sysinstall/pc-sysinstall.log.
"""
class PyApp:
def on_reboot(self, widget):
Popen('sudo reboot', shell=True)
gtk.main_quit()
def on_close(self, widget):
gtk.main_quit()
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_position(gtk.WIN_POS_CENTER)
window.set_border_width(8)
window.connect("destroy", gtk.main_quit)
window.set_title("Installation Error")
# window.set_icon_from_file("/usr/local/lib/gbi/logo.png")
box1 = gtk.VBox(False, 0)
window.add(box1)
box1.show()
box2 = gtk.VBox(False, 10)
box2.set_border_width(10)
box1.pack_start(box2, True, True, 0)
box2.show()
title = gtk.Label()
title.set_use_markup(True)
title.set_markup('<b><span size="larger">Installation has failed!</span></b>')
label = gtk.Label(lyrics)
label.set_use_markup(True)
label.set_markup("Please report the issue to <a href='http://issues.ghostbsd.org/my_view_page.php'>GhostBSD issue system</a>,\nand be sure to provide tmp/.pc-sysinstall/pc-sysinstall.log.")
box2.pack_start(title)
box2.pack_start(label)
box2 = gtk.HBox(False, 10)
box2.set_border_width(5)
box1.pack_start(box2, False, True, 0)
box2.show()
table = gtk.Table(1, 2, True)
ok = gtk.Button("Ok")
ok.connect("clicked", self.on_close)
table.attach(ok, 0, 2, 0, 1)
box2.pack_start(table)
window.show_all()
PyApp()
gtk.main()
| bsd-3-clause |
DirtyPiece/dancestudio | Build/Tools/Python27/Lib/sgmllib.py | 306 | 17884 | """A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
from warnings import warnpy3k
warnpy3k("the sgmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| mit |
pgum/emi1 | lib/python3.5/site-packages/pip/_vendor/progress/counter.py | 510 | 1502 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from . import Infinite, Progress
from .helpers import WriteMixin
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index))
class Countdown(WriteMixin, Progress):
hide_cursor = True
def update(self):
self.write(str(self.remaining))
class Stack(WriteMixin, Progress):
phases = (u' ', u'▁', u'▂', u'▃', u'▄', u'▅', u'▆', u'▇', u'█')
hide_cursor = True
def update(self):
nphases = len(self.phases)
i = min(nphases - 1, int(self.progress * nphases))
self.write(self.phases[i])
class Pie(Stack):
phases = (u'○', u'◔', u'◑', u'◕', u'●')
| gpl-3.0 |
SickGear/SickGear | lib/js2py/constructors/jsfunction.py | 9 | 1399 | from ..base import *
try:
from ..translators.translator import translate_js
except:
pass
@Js
def Function():
# convert arguments to python list of strings
a = [e.to_string().value for e in arguments.to_list()]
body = ';'
args = ()
if len(a):
body = '%s;' % a[-1]
args = a[:-1]
# translate this function to js inline function
js_func = '(function (%s) {%s})' % (','.join(args), body)
# now translate js inline to python function
py_func = translate_js(js_func, '')
# add set func scope to global scope
# a but messy solution but works :)
globals()['var'] = PyJs.GlobalObject
# define py function and return it
temp = executor(py_func, globals())
temp.source = '{%s}' % body
temp.func_name = 'anonymous'
return temp
def executor(f, glob):
exec (f, globals())
return globals()['PyJs_anonymous_0_']
#new statement simply calls Function
Function.create = Function
#set constructor property inside FunctionPrototype
fill_in_props(FunctionPrototype, {'constructor': Function}, default_attrs)
#attach prototype to Function constructor
Function.define_own_property(
'prototype', {
'value': FunctionPrototype,
'enumerable': False,
'writable': False,
'configurable': False
})
#Fix Function length (its 0 and should be 1)
Function.own['length']['value'] = Js(1)
| gpl-3.0 |
agry/NGECore2 | scripts/mobiles/dynamicgroups/dantooine_kunga.py | 2 | 1024 | # Spawn Group file created with PSWG Planetary Spawn Tool
import sys
from java.util import Vector
from services.spawn import DynamicSpawnGroup
from services.spawn import MobileTemplate
def addDynamicGroup(core):
dynamicGroup = DynamicSpawnGroup()
mobileTemplates = Vector()
mobileTemplates.add('kunga_clan_leader')
mobileTemplates.add('kunga_clan_primalist')
mobileTemplates.add('kunga_harvester')
mobileTemplates.add('kunga_herbalist')
mobileTemplates.add('kunga_hunter')
mobileTemplates.add('kunga_loreweaver')
mobileTemplates.add('kunga_rockshaper')
mobileTemplates.add('kunga_scout')
mobileTemplates.add('kunga_shaman')
mobileTemplates.add('kunga_soothsayer')
mobileTemplates.add('kunga_tribesman')
mobileTemplates.add('kunga_warrior')
dynamicGroup.setMobiles(mobileTemplates)
dynamicGroup.setGroupMembersNumber(-3)
dynamicGroup.setName('dantooine_kunga')
dynamicGroup.setMaxSpawns(-1)
dynamicGroup.setMinSpawnDistance(150)
core.spawnService.addDynamicGroup('dantooine_kunga', dynamicGroup)
return
| lgpl-3.0 |
phobson/pycvc | setup.py | 2 | 1510 | # Setup script for the pycvc package
#
# Usage: python setup.py install
#
import os
from setuptools import setup, find_packages
DESCRIPTION = "pycvc: Analyze CVC stormwater data"
LONG_DESCRIPTION = DESCRIPTION
NAME = "pycvc"
VERSION = "0.3.0"
AUTHOR = "Paul Hobson (Geosyntec Consultants)"
AUTHOR_EMAIL = "[email protected]"
URL = ""
DOWNLOAD_URL = ""
LICENSE = "BSD 3-clause"
PACKAGES = find_packages(exclude=[])
PLATFORMS = "Python 3.4 and later."
CLASSIFIERS = [
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Formats and Protocols :: Data Formats",
"Topic :: Scientific/Engineering :: Earth Sciences",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
INSTALL_REQUIRES = ['wqio', 'pybmpdb', 'pynsqd']
PACKAGE_DATA = {
'pycvc.tex': ['*.tex'],
'pycvc.tests.testdata': ['*.csv', '*.accdb'],
'pycvc.tests.baseline_images.viz_tests': ['*.png'],
}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=PACKAGES,
package_data=PACKAGE_DATA,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
zip_safe=False
)
| bsd-3-clause |
xen0l/ansible | lib/ansible/modules/network/f5/bigip_hostname.py | 14 | 7419 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_hostname
short_description: Manage the hostname of a BIG-IP
description:
- Manage the hostname of a BIG-IP.
version_added: 2.3
options:
hostname:
description:
- Hostname of the BIG-IP host.
required: True
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Matthew Lam (@mryanlam)
'''
EXAMPLES = r'''
- name: Set the hostname of the BIG-IP
bigip_hostname:
hostname: bigip.localhost.localdomain
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
hostname:
description: The new hostname of the device
returned: changed
type: string
sample: big-ip01.internal
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_attributes = ['hostname']
updatables = ['hostname']
returnables = ['hostname']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def hostname(self):
if self._values['hostname'] is None:
return None
return str(self._values['hostname'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
pass
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = ApiParameters()
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def read_current_from_device(self):
resource = self.client.api.tm.sys.global_settings.load()
result = resource.attrs
collection = self.client.api.tm.cm.devices.get_collection()
self_device = next((x.name for x in collection if x.selfDevice == "true"), None)
result['self_device'] = self_device
return ApiParameters(params=result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.global_settings.load()
resource.modify(**params)
if self.have.self_device:
self.client.api.tm.cm.devices.exec_cmd(
'mv', name=self.have.self_device, target=self.want.hostname
)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
hostname=dict(
required=True
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
Dioptas/Dioptas | dioptas/controller/integration/ImageController.py | 1 | 49794 | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from functools import partial
import numpy as np
from PIL import Image
from qtpy import QtWidgets, QtCore
from ...widgets.UtilityWidgets import open_file_dialog, open_files_dialog, save_file_dialog
# imports for type hinting in PyCharm -- DO NOT DELETE
from ...widgets.integration import IntegrationWidget
from ...model.DioptasModel import DioptasModel
from ...model.util.Pattern import Pattern
from ...model.util.HelperModule import get_partial_index, get_partial_value
from .EpicsController import EpicsController
class ImageController(object):
"""
The ImageController manages the Image actions in the Integration Window. It connects the file actions, as
well as interaction with the image_view.
"""
def __init__(self, widget, dioptas_model):
"""
:param widget: Reference to IntegrationView
:param dioptas_model: Reference to DioptasModel object
:type widget: IntegrationWidget
:type dioptas_model: DioptasModel
"""
self.widget = widget
self.model = dioptas_model
self.epics_controller = EpicsController(self.widget, self.model)
self.img_docked = True
self.view_mode = 'normal' # modes available: normal, alternative
self.roi_active = False
self.clicked_tth = None
self.clicked_azi = None
self.vertical_splitter_alternative_state = None
self.vertical_splitter_normal_state = None
self.horizontal_splitter_alternative_state = None
self.horizontal_splitter_normal_state = None
self.initialize()
self.create_signals()
self.create_mouse_behavior()
def initialize(self):
self.update_img_control_widget()
self.plot_img()
self.plot_mask()
self.widget.img_widget.auto_level()
def plot_img(self, auto_scale=None):
"""
Plots the current image loaded in self.img_data.
:param auto_scale:
Determines if intensities shouldk be auto-scaled. If value is None it will use the parameter saved in the
Object (self._auto_scale)
"""
if auto_scale is None:
auto_scale = self.widget.img_autoscale_btn.isChecked()
if self.widget.integration_image_widget.show_background_subtracted_img_btn.isChecked():
self.widget.img_widget.plot_image(self.model.img_model.img_data, False)
else:
self.widget.img_widget.plot_image(self.model.img_model.raw_img_data, False)
if auto_scale:
self.widget.img_widget.auto_level()
def plot_cake(self, auto_scale=None):
"""
Plots the cake saved in the calibration data
:param auto_scale:
Determines if the intensity should be auto-scaled. If value is None it will use the parameter saved in the
object (self._auto_scale)
"""
if auto_scale is None:
auto_scale = self.widget.img_autoscale_btn.isChecked()
shift_amount = self.widget.cake_shift_azimuth_sl.value()
self.widget.cake_widget.plot_image(np.roll(self.model.cake_data, shift_amount, axis=0))
self.plot_cake_integral()
self.update_cake_axes_range()
if auto_scale:
self.widget.cake_widget.auto_level()
def plot_cake_integral(self, tth=None):
if not self.widget.cake_widget.cake_integral_plot.isVisible() or self.clicked_tth is None:
return
if tth is None:
tth = self.clicked_tth
x, y = self.model.calibration_model.cake_integral(
tth,
self.widget.integration_control_widget.integration_options_widget.cake_integral_width_sb.value()
)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
self.widget.cake_widget.plot_cake_integral(x, np.roll(y, shift_amount))
def save_cake_integral(self):
img_filename, _ = os.path.splitext(os.path.basename(self.model.img_model.filename))
filename = save_file_dialog(
self.widget, "Save Cake Integral Data.",
os.path.join(self.model.working_directories['pattern'],
img_filename + '.xy'))
if filename != '':
integral_pattern = Pattern(*self.widget.cake_widget.cake_integral_item.getData())
integral_pattern.save(filename)
def plot_mask(self):
"""
Plots the mask data.
"""
if self.model.use_mask and self.widget.img_mode == 'Image':
self.widget.img_widget.plot_mask(self.model.mask_model.get_img())
self.widget.img_mask_btn.setChecked(True)
else:
self.widget.img_widget.plot_mask(np.zeros(self.model.mask_model.get_img().shape))
self.widget.img_mask_btn.setChecked(False)
def update_mask_transparency(self):
"""
Changes the colormap of the mask according to the transparency option selection in the GUI. Resulting Mask will
be either transparent or solid.
"""
self.model.transparent_mask = self.widget.mask_transparent_cb.isChecked()
if self.model.transparent_mask:
self.widget.img_widget.set_mask_color([255, 0, 0, 100])
else:
self.widget.img_widget.set_mask_color([255, 0, 0, 255])
def create_signals(self):
self.model.configuration_selected.connect(self.update_gui_from_configuration)
self.model.img_changed.connect(self.update_img_control_widget)
self.model.img_changed.connect(self.plot_img)
self.model.img_changed.connect(self.plot_mask)
"""
Creates all the connections of the GUI elements.
"""
self.widget.img_step_file_widget.next_btn.clicked.connect(self.load_next_img)
self.widget.img_step_file_widget.previous_btn.clicked.connect(self.load_previous_img)
self.widget.load_img_btn.clicked.connect(self.load_file)
self.widget.img_filename_txt.editingFinished.connect(self.filename_txt_changed)
self.widget.img_directory_txt.editingFinished.connect(self.directory_txt_changed)
self.widget.img_directory_btn.clicked.connect(self.img_directory_btn_click)
self.widget.img_step_series_widget.next_btn.clicked.connect(self.load_next_series_img)
self.widget.img_step_series_widget.previous_btn.clicked.connect(self.load_prev_series_img)
self.widget.img_step_series_widget.pos_txt.editingFinished.connect(self.load_series_img)
self.widget.file_info_btn.clicked.connect(self.show_file_info)
self.widget.integration_control_widget.img_control_widget.batch_btn.clicked.connect(self.show_batch_frame)
self.widget.img_step_file_widget.browse_by_name_rb.clicked.connect(self.set_iteration_mode_number)
self.widget.img_step_file_widget.browse_by_time_rb.clicked.connect(self.set_iteration_mode_time)
self.widget.image_control_widget.sources_cb.currentTextChanged.connect(self.select_source)
###
# Image widget image specific controls
self.widget.img_roi_btn.clicked.connect(self.click_roi_btn)
self.widget.img_mask_btn.clicked.connect(self.change_mask_mode)
self.widget.mask_transparent_cb.clicked.connect(self.update_mask_transparency)
###
# Image Widget cake specific controls
self.widget.img_phases_btn.clicked.connect(self.toggle_show_phases)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(partial(self.plot_cake, None))
self.widget.cake_shift_azimuth_sl.valueChanged.connect(self._update_cake_mouse_click_pos)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(self.update_cake_azimuth_axis)
self.widget.cake_shift_azimuth_sl.valueChanged.connect(partial(self.plot_cake_integral, None))
self.widget.integration_image_widget.cake_view.img_view_box.sigRangeChanged.connect(self.update_cake_axes_range)
self.widget.pattern_q_btn.clicked.connect(partial(self.set_cake_axis_unit, 'q_A^-1'))
self.widget.pattern_tth_btn.clicked.connect(partial(self.set_cake_axis_unit, '2th_deg'))
self.widget.integration_control_widget.integration_options_widget.cake_integral_width_sb.valueChanged. \
connect(partial(self.plot_cake_integral, None))
self.widget.integration_control_widget.integration_options_widget.cake_save_integral_btn.clicked. \
connect(self.save_cake_integral)
###
# General Image Widget controls
self.widget.img_dock_btn.clicked.connect(self.img_dock_btn_clicked)
self.widget.img_autoscale_btn.clicked.connect(self.img_autoscale_btn_clicked)
self.widget.img_mode_btn.clicked.connect(self.change_view_mode)
self.widget.integration_image_widget.show_background_subtracted_img_btn.clicked.connect(
self.show_background_subtracted_img_btn_clicked)
self.widget.qa_save_img_btn.clicked.connect(self.save_img)
self.widget.load_calibration_btn.clicked.connect(self.load_calibration)
# signals
self.widget.change_view_btn.clicked.connect(self.change_view_btn_clicked)
self.widget.autoprocess_cb.toggled.connect(self.auto_process_cb_click)
def create_mouse_behavior(self):
"""
Creates the signal connections of mouse interactions
"""
self.widget.img_widget.mouse_left_clicked.connect(self.img_mouse_click)
self.widget.img_widget.mouse_moved.connect(self.show_img_mouse_position)
self.widget.cake_widget.mouse_left_clicked.connect(self.img_mouse_click)
self.widget.cake_widget.mouse_moved.connect(self.show_img_mouse_position)
self.widget.pattern_widget.mouse_left_clicked.connect(self.pattern_mouse_click)
def load_file(self, *args, **kwargs):
filename = kwargs.get('filename', None)
if filename is None:
filenames = open_files_dialog(self.widget, "Load image data file(s)",
self.model.working_directories['image'])
else:
filenames = [filename]
if filenames is not None and len(filenames) != 0:
self.model.working_directories['image'] = os.path.dirname(str(filenames[0]))
if len(filenames) == 1:
self.model.img_model.load(str(filenames[0]))
else:
if self.widget.img_batch_mode_add_rb.isChecked():
self.model.img_model.blockSignals(True)
self.model.img_model.load(str(filenames[0]))
for ind in range(1, len(filenames)):
self.model.img_model.add(filenames[ind])
self.model.img_model.blockSignals(False)
self.model.img_model.img_changed.emit()
elif self.widget.img_batch_mode_integrate_rb.isChecked():
self._load_multiple_files(filenames)
elif self.widget.img_batch_mode_image_save_rb.isChecked():
self._save_multiple_image_files(filenames)
def _load_multiple_files(self, filenames):
if not self.model.calibration_model.is_calibrated:
self.widget.show_error_msg("Can not integrate multiple images without calibration.")
return
working_directory = self._get_pattern_working_directory()
if working_directory == '':
return # abort file processing if no directory was selected
progress_dialog = self.widget.get_progress_dialog("Integrating multiple files.", "Abort Integration",
len(filenames))
self._set_up_batch_processing()
for ind in range(len(filenames)):
filename = str(filenames[ind])
base_filename = os.path.basename(filename)
progress_dialog.setValue(ind)
progress_dialog.setLabelText("Integrating: " + base_filename)
self.model.img_model.blockSignals(True)
self.model.img_model.load(filename)
self.model.img_model.blockSignals(False)
x, y = self.integrate_pattern()
self._save_pattern(base_filename, working_directory, x, y)
QtWidgets.QApplication.processEvents()
if progress_dialog.wasCanceled():
break
progress_dialog.close()
self._tear_down_batch_processing()
def _get_pattern_working_directory(self):
if self.widget.pattern_autocreate_cb.isChecked():
working_directory = self.model.working_directories['pattern']
else:
# if there is no working directory selected A file dialog opens up to choose a directory...
working_directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget, "Please choose the output directory for the integrated Patterns.",
self.model.working_directories['pattern']))
return working_directory
def _set_up_batch_processing(self):
self.model.blockSignals(True)
def _tear_down_batch_processing(self):
self.model.blockSignals(False)
self.model.img_changed.emit()
self.model.pattern_changed.emit()
def _save_multiple_image_files(self, filenames):
working_directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget, "Please choose the output directory for the Images.",
self.model.working_directories['image']))
if working_directory == '':
return
self._set_up_batch_processing()
progress_dialog = self.widget.get_progress_dialog("Saving multiple image files.", "Abort",
len(filenames))
QtWidgets.QApplication.processEvents()
self.model.current_configuration.auto_integrate_pattern = False
for ind, filename in enumerate(filenames):
base_filename = os.path.basename(filename)
progress_dialog.setValue(ind)
progress_dialog.setLabelText("Saving: " + base_filename)
self.model.img_model.load(str(filename))
self.save_img(os.path.join(working_directory, 'batch_' + base_filename))
QtWidgets.QApplication.processEvents()
if progress_dialog.wasCanceled():
break
self.model.current_configuration.auto_integrate_pattern = True
progress_dialog.close()
self._tear_down_batch_processing()
def _save_pattern(self, base_filename, working_directory, x, y):
file_endings = self._get_pattern_file_endings()
for file_ending in file_endings:
filename = os.path.join(working_directory, os.path.splitext(base_filename)[0] + file_ending)
self.model.pattern_model.set_pattern(x, y, filename, unit=self.get_integration_unit())
if file_ending == '.xy':
self.model.pattern_model.save_pattern(filename, header=self._create_pattern_header())
else:
self.model.pattern_model.save_pattern(filename)
# save the background subtracted filename
if self.model.pattern.has_background():
directory = os.path.join(working_directory, 'bkg_subtracted')
if not os.path.exists(directory):
os.mkdir(directory)
filename = os.path.join(directory, self.model.pattern.name + file_ending)
if file_ending == '.xy':
self.model.pattern_model.save_pattern(filename, header=self._create_pattern_header(),
subtract_background=True)
else:
self.model.pattern_model.save_pattern(filename, subtract_background=True)
def _create_pattern_header(self):
header = self.model.calibration_model.create_file_header()
header = header.replace('\r\n', '\n')
header += '\n#\n# ' + self.model.pattern_model.unit + '\t I'
return header
def _get_pattern_file_endings(self):
res = []
if self.widget.pattern_header_xy_cb.isChecked():
res.append('.xy')
if self.widget.pattern_header_chi_cb.isChecked():
res.append('.chi')
if self.widget.pattern_header_dat_cb.isChecked():
res.append('.dat')
return res
def show_batch_frame(self):
self.widget.batch_widget.raise_widget()
def show_file_info(self):
self.widget.file_info_widget.raise_widget()
def get_integration_unit(self):
if self.widget.pattern_tth_btn.isChecked():
return '2th_deg'
elif self.widget.pattern_q_btn.isChecked():
return 'q_A^-1'
elif self.widget.pattern_d_btn.isChecked():
return 'd_A'
def integrate_pattern(self):
if self.widget.img_mask_btn.isChecked():
mask = self.model.mask_model.get_mask()
else:
mask = None
if self.widget.img_roi_btn.isChecked():
roi_mask = self.widget.img_widget.roi.getRoiMask(self.model.img_data.shape)
else:
roi_mask = None
if roi_mask is None and mask is None:
mask = None
elif roi_mask is None and mask is not None:
mask = mask
elif roi_mask is not None and mask is None:
mask = roi_mask
elif roi_mask is not None and mask is not None:
mask = np.logical_or(mask, roi_mask)
if self.widget.pattern_tth_btn.isChecked():
integration_unit = '2th_deg'
elif self.widget.pattern_q_btn.isChecked():
integration_unit = 'q_A^-1'
elif self.widget.pattern_d_btn.isChecked():
integration_unit = 'd_A'
else:
# in case something weird happened
print('No correct integration unit selected')
return
if not self.widget.automatic_binning_cb.isChecked():
num_points = int(str(self.widget.bin_count_txt.text()))
else:
num_points = None
return self.model.calibration_model.integrate_1d(mask=mask, unit=integration_unit, num_points=num_points)
def change_mask_mode(self):
self.model.use_mask = self.widget.integration_image_widget.mask_btn.isChecked()
self.widget.mask_transparent_cb.setVisible(self.model.use_mask)
self.plot_mask()
self.model.img_model.img_changed.emit()
def update_mask_mode(self):
self.widget.integration_image_widget.mask_btn.setChecked(bool(self.model.use_mask))
self.widget.mask_transparent_cb.setVisible(bool(self.model.use_mask))
self.widget.mask_transparent_cb.setChecked(bool(self.model.transparent_mask))
def update_img_mode(self):
self.widget.img_mode_btn.click()
def load_series_img(self):
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos)
def load_prev_series_img(self):
step = int(str(self.widget.img_step_series_widget.step_txt.text()))
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos - step)
def load_next_series_img(self):
step = int(str(self.widget.img_step_series_widget.step_txt.text()))
pos = int(str(self.widget.img_step_series_widget.pos_txt.text()))
self.model.img_model.load_series_img(pos + step)
def load_next_img(self):
step = int(str(self.widget.img_step_file_widget.step_txt.text()))
self.model.img_model.load_next_file(step=step)
def load_previous_img(self):
step = int(str(self.widget.img_step_file_widget.step_txt.text()))
self.model.img_model.load_previous_file(step=step)
def filename_txt_changed(self):
current_filename = os.path.basename(self.model.img_model.filename)
current_directory = str(self.widget.img_directory_txt.text())
new_filename = str(self.widget.img_filename_txt.text())
if os.path.exists(os.path.join(current_directory, new_filename)):
try:
self.load_file(filename=os.path.join(current_directory, new_filename))
except TypeError:
self.widget.img_filename_txt.setText(current_filename)
else:
self.widget.img_filename_txt.setText(current_filename)
def directory_txt_changed(self):
new_directory = str(self.widget.img_directory_txt.text())
if os.path.exists(new_directory) and new_directory != self.model.working_directories['image']:
if self.model.img_model.autoprocess:
self._files_now = dict([(f, None) for f in os.listdir(self.model.working_directories['image'])])
self.model.working_directories['image'] = os.path.abspath(new_directory)
old_filename = str(self.widget.img_filename_txt.text())
self.widget.img_filename_txt.setText(old_filename + '*')
else:
self.widget.img_directory_txt.setText(self.model.working_directories['image'])
def img_directory_btn_click(self):
directory = str(QtWidgets.QFileDialog.getExistingDirectory(
self.widget,
"Please choose the image working directory.",
self.model.working_directories['image']))
if directory != '':
if self.model.img_model.autoprocess:
self._files_now = dict([(f, None) for f in os.listdir(self.model.working_directories['image'])])
self.model.working_directories['image'] = directory
self.widget.img_directory_txt.setText(directory)
def update_img_control_widget(self):
self.widget.img_step_series_widget.setVisible(int(self.model.img_model.series_max > 1))
self.widget.img_step_series_widget.pos_validator.setTop(self.model.img_model.series_max)
self.widget.img_step_series_widget.pos_txt.setText(str(self.model.img_model.series_pos))
self.widget.file_info_btn.setVisible(self.model.img_model.file_info != "")
self.widget.move_btn.setVisible(len(self.model.img_model.motors_info) > 0)
self.widget.img_filename_txt.setText(os.path.basename(self.model.img_model.filename))
self.widget.img_directory_txt.setText(os.path.dirname(self.model.img_model.filename))
self.widget.file_info_widget.text_lbl.setText(self.model.img_model.file_info)
self.widget.image_control_widget.sources_widget.setVisible(not (self.model.img_model.sources is None))
if self.model.img_model.sources is not None:
sources_cb = self.widget.image_control_widget.sources_cb
sources_cb.blockSignals(True)
# remove all previous items:
for _ in range(sources_cb.count()):
sources_cb.removeItem(0)
sources_cb.addItems(self.model.img_model.sources)
sources_cb.setCurrentText(self.model.img_model.selected_source)
sources_cb.blockSignals(False)
self.widget.cbn_plot_btn.setText('Plot')
self.widget.oiadac_plot_btn.setText('Plot')
# update the window due to some errors on mac when using macports
self._get_master_parent().update()
def _get_master_parent(self):
master_widget_parent = self.widget
while master_widget_parent.parent():
master_widget_parent = master_widget_parent.parent()
return master_widget_parent
def click_roi_btn(self):
if self.model.current_configuration.roi is None:
self.model.current_configuration.roi = self.widget.img_widget.roi.getRoiLimits()
else:
self.model.current_configuration.roi = None
self.update_roi_in_gui()
def update_roi_in_gui(self):
roi = self.model.mask_model.roi
if roi is None:
self.widget.img_widget.deactivate_roi()
self.widget.img_roi_btn.setChecked(False)
if self.roi_active:
self.widget.img_widget.roi.sigRegionChangeFinished.disconnect(self.update_roi_in_model)
self.roi_active = False
return
if not self.model.current_configuration.auto_integrate_cake:
self.widget.img_roi_btn.setChecked(True)
self.widget.img_widget.activate_roi()
self.widget.img_widget.update_roi_shade_limits(self.model.img_data.shape)
pos = QtCore.QPoint(int(roi[2]), int(roi[0]))
size = QtCore.QPoint(int(roi[3] - roi[2]), int(roi[1] - roi[0]))
self.widget.img_widget.roi.setRoiLimits(pos, size)
if not self.roi_active:
self.widget.img_widget.roi.sigRegionChangeFinished.connect(self.update_roi_in_model)
self.roi_active = True
def update_roi_in_model(self):
self.model.current_configuration.roi = self.widget.img_widget.roi.getRoiLimits()
def change_view_mode(self):
if str(self.widget.img_mode_btn.text()) == 'Cake':
self.activate_cake_mode()
elif str(self.widget.img_mode_btn.text()) == 'Image':
self.activate_image_mode()
def toggle_show_phases(self):
if str(self.widget.img_phases_btn.text()) == 'Show Phases':
self.widget.integration_image_widget.cake_view.show_all_visible_cake_phases(
self.widget.phase_widget.phase_show_cbs)
self.widget.img_phases_btn.setText('Hide Phases')
self.model.enabled_phases_in_cake.emit()
elif str(self.widget.img_phases_btn.text()) == 'Hide Phases':
self.widget.integration_image_widget.cake_view.hide_all_cake_phases()
self.widget.img_phases_btn.setText('Show Phases')
def activate_cake_mode(self):
if not self.model.current_configuration.auto_integrate_cake:
self.model.current_configuration.auto_integrate_cake = True
self.model.current_configuration.integrate_image_2d()
self._update_cake_line_pos()
self._update_cake_mouse_click_pos()
self.widget.img_mode_btn.setText('Image')
self.widget.img_mode = str("Cake")
self.model.img_changed.disconnect(self.plot_img)
self.model.img_changed.disconnect(self.plot_mask)
self.model.cake_changed.connect(self.plot_cake)
self.plot_cake()
self.widget.cake_shift_azimuth_sl.setVisible(True)
self.widget.cake_shift_azimuth_sl.setMinimum(int(-len(self.model.cake_azi) / 2))
self.widget.cake_shift_azimuth_sl.setMaximum(int(len(self.model.cake_azi) / 2))
self.widget.cake_shift_azimuth_sl.setSingleStep(1)
self.widget.img_phases_btn.setVisible(True)
self.widget.integration_image_widget.img_pg_layout.hide()
self.widget.integration_image_widget.cake_pg_layout.show()
def activate_image_mode(self):
if self.model.current_configuration.auto_integrate_cake:
self.model.current_configuration.auto_integrate_cake = False
self.widget.cake_shift_azimuth_sl.setVisible(False)
self.widget.img_phases_btn.setVisible(False)
self._update_image_line_pos()
self._update_image_mouse_click_pos()
self.widget.img_mode_btn.setText('Cake')
self.widget.img_mode = str("Image")
self.model.img_changed.connect(self.plot_img)
self.model.img_changed.connect(self.plot_mask)
self.model.cake_changed.disconnect(self.plot_cake)
self.plot_img()
self.plot_mask()
self.widget.integration_image_widget.img_pg_layout.show()
self.widget.integration_image_widget.cake_pg_layout.hide()
def img_autoscale_btn_clicked(self):
if self.widget.img_autoscale_btn.isChecked():
self.widget.img_widget.auto_level()
def img_dock_btn_clicked(self):
self.img_docked = not self.img_docked
self.widget.dock_img(self.img_docked)
def show_background_subtracted_img_btn_clicked(self):
if self.widget.img_mode_btn.text() == 'Cake':
self.plot_img()
else:
self.widget.integration_image_widget.show_background_subtracted_img_btn.setChecked(False)
def _update_cake_line_pos(self):
cur_tth = self.get_current_pattern_tth()
if self.model.cake_tth is None or cur_tth < np.min(self.model.cake_tth) or cur_tth > np.max(
self.model.cake_tth):
self.widget.cake_widget.deactivate_vertical_line()
else:
new_pos = get_partial_index(self.model.cake_tth, cur_tth) + 0.5
self.widget.cake_widget.set_vertical_line_pos(new_pos, 0)
self.widget.cake_widget.activate_vertical_line()
def _update_cake_mouse_click_pos(self):
if self.clicked_tth is None or not self.model.calibration_model.is_calibrated:
return
tth = self.clicked_tth
azi = self.clicked_azi
cake_tth = self.model.cake_tth
x_pos = get_partial_index(cake_tth, tth) + 0.5
shift_amount = self.widget.cake_shift_azimuth_sl.value()
y_pos = (get_partial_index(self.model.cake_azi, azi) + 0.5 + shift_amount) % len(self.model.cake_azi)
self.widget.cake_widget.set_mouse_click_position(x_pos, y_pos)
def _update_image_line_pos(self):
if not self.model.calibration_model.is_calibrated:
return
cur_tth = self.get_current_pattern_tth()
self.widget.img_widget.set_circle_line(
self.model.calibration_model.get_two_theta_array(), cur_tth / 180 * np.pi)
def _update_image_mouse_click_pos(self):
if self.clicked_tth is None or not self.model.calibration_model.is_calibrated:
return
tth = np.deg2rad(self.clicked_tth)
azi = np.deg2rad(self.clicked_azi)
new_pos = self.model.calibration_model.get_pixel_ind(tth, azi)
if len(new_pos) == 0:
self.widget.img_widget.mouse_click_item.hide()
else:
x_ind, y_ind = new_pos
self.widget.img_widget.set_mouse_click_position(y_ind + 0.5, x_ind + 0.5)
self.widget.img_widget.mouse_click_item.show()
def get_current_pattern_tth(self):
cur_pos = self.widget.pattern_widget.pos_line.getPos()[0]
if self.widget.pattern_q_btn.isChecked():
cur_tth = self.convert_x_value(cur_pos, 'q_A^-1', '2th_deg')
elif self.widget.pattern_tth_btn.isChecked():
cur_tth = cur_pos
elif self.widget.pattern_d_btn.isChecked():
cur_tth = self.convert_x_value(cur_pos, 'd_A', '2th_deg')
else:
cur_tth = None
return cur_tth
def update_cake_axes_range(self):
if self.model.current_configuration.auto_integrate_cake:
self.update_cake_azimuth_axis()
self.update_cake_x_axis()
def update_cake_azimuth_axis(self):
data_img_item = self.widget.integration_image_widget.cake_view.data_img_item
shift_amount = self.widget.cake_shift_azimuth_sl.value()
cake_azi = self.model.cake_azi - shift_amount * np.mean(np.diff(self.model.cake_azi))
height = data_img_item.viewRect().height()
bottom = data_img_item.viewRect().top()
v_scale = (cake_azi[-1] - cake_azi[0]) / data_img_item.boundingRect().height()
v_shift = np.min(cake_azi[0])
min_azi = v_scale * bottom + v_shift
max_azi = v_scale * (bottom + height) + v_shift
self.widget.integration_image_widget.cake_view.left_axis_cake.setRange(min_azi, max_azi)
def update_cake_x_axis(self):
if self.model.cake_tth is None:
return
data_img_item = self.widget.integration_image_widget.cake_view.data_img_item
cake_tth = self.model.cake_tth
width = data_img_item.viewRect().width()
left = data_img_item.viewRect().left()
h_scale = (np.max(cake_tth) - np.min(cake_tth)) / data_img_item.boundingRect().width()
h_shift = np.min(cake_tth)
min_tth = h_scale * left + h_shift
max_tth = h_scale * (left + width) + h_shift
if self.model.current_configuration.integration_unit == '2th_deg':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setRange(min_tth, max_tth)
elif self.model.current_configuration.integration_unit == 'q_A^-1':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setRange(
self.convert_x_value(min_tth, '2th_deg', 'q_A^-1'),
self.convert_x_value(max_tth, '2th_deg', 'q_A^-1'))
def set_cake_axis_unit(self, unit='2th_deg'):
if unit == '2th_deg':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setLabel(u'2θ', u'°')
elif unit == 'q_A^-1':
self.widget.integration_image_widget.cake_view.bottom_axis_cake.setLabel('Q', 'A<sup>-1</sup>')
self.update_cake_x_axis()
def show_img_mouse_position(self, x, y):
if self.widget.img_mode == 'Cake':
img_data = self.widget.cake_widget.img_data
else:
img_data = self.widget.img_widget.img_data
img_shape = img_data.shape
if 0 < x < img_shape[1] - 1 and 0 < y < img_shape[0] - 1:
self.update_mouse_position_labels(x, y, img_data[int(np.floor(y)), int(np.floor(x))])
if self.model.calibration_model.is_calibrated:
x_temp = x
x = np.array([y])
y = np.array([x_temp])
if self.widget.img_mode == 'Cake':
tth = get_partial_value(self.model.cake_tth, y - 0.5)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
cake_azi = self.model.cake_azi - shift_amount * np.mean(np.diff(self.model.cake_azi))
azi = get_partial_value(cake_azi, x - 0.5)
q_value = self.convert_x_value(tth, '2th_deg', 'q_A^-1')
else:
tth = self.model.calibration_model.get_two_theta_img(x, y)
tth = tth / np.pi * 180.0
q_value = self.convert_x_value(tth, '2th_deg', 'q_A^-1')
azi = self.model.calibration_model.get_azi_img(x, y) / np.pi * 180
d = self.convert_x_value(tth, '2th_deg', 'd_A')
tth_str = u"2θ:%9.3f " % tth
self.widget.mouse_tth_lbl.setText(tth_str)
self.widget.mouse_d_lbl.setText('d:%9.3f ' % d)
self.widget.mouse_q_lbl.setText('Q:%9.3f ' % q_value)
self.widget.mouse_azi_lbl.setText('X:%9.3f ' % azi)
self.widget.img_widget_mouse_tth_lbl.setText(tth_str)
self.widget.img_widget_mouse_d_lbl.setText('d:%9.3f ' % d)
self.widget.img_widget_mouse_q_lbl.setText('Q:%9.3f ' % q_value)
self.widget.img_widget_mouse_azi_lbl.setText('X:%9.3f ' % azi)
else:
self.widget.mouse_tth_lbl.setText(u'2θ: -')
self.widget.mouse_d_lbl.setText('d: -')
self.widget.mouse_q_lbl.setText('Q: -')
self.widget.mouse_azi_lbl.setText('X: -')
self.widget.img_widget_mouse_tth_lbl.setText(u'2θ: -')
self.widget.img_widget_mouse_d_lbl.setText('d: -')
self.widget.img_widget_mouse_q_lbl.setText('Q: -')
self.widget.img_widget_mouse_azi_lbl.setText('X: -')
else:
self.update_mouse_position_labels(x, y, None)
def img_mouse_click(self, x, y):
if self.widget.img_mode == 'Cake':
img_data = self.widget.cake_widget.img_data
else:
img_data = self.widget.img_widget.img_data
if 0 < x < img_data.shape[1] - 1 and 0 < y < img_data.shape[0] - 1:
intensity = img_data[int(np.floor(y)), int(np.floor(x))]
else:
intensity = None
self.update_mouse_click_position_labels(x, y, intensity)
if self.model.calibration_model.is_calibrated:
x, y = y, x # the indices are reversed for the img_array
if self.widget.img_mode == 'Cake': # cake mode
# get clicked tth and azimuth
cake_shape = self.model.cake_data.shape
if x < 0 or y < 0 or x > cake_shape[0] - 1 or y > cake_shape[1] - 1:
return
x = np.array([x])
y = np.array([y])
tth = get_partial_value(self.model.cake_tth, y - 0.5)
shift_amount = self.widget.cake_shift_azimuth_sl.value()
azi = get_partial_value(np.roll(self.model.cake_azi, shift_amount), x - 0.5)
self.widget.cake_widget.activate_vertical_line()
elif self.widget.img_mode == 'Image': # image mode
img_shape = self.model.img_data.shape
if x < 0 or y < 0 or x > img_shape[0] - 1 or y > img_shape[1] - 1:
return
x = np.array([x])
y = np.array([y])
tth = np.rad2deg(self.model.calibration_model.get_two_theta_img(x, y))
azi = np.rad2deg(self.model.calibration_model.get_azi_img(x, y))
self.widget.img_widget.set_circle_line(self.model.calibration_model.get_two_theta_array(),
np.deg2rad(tth))
else: # in the case of whatever
tth = 0
azi = 0
self.clicked_tth = tth # in degree
self.clicked_azi = azi # in degree
if self.widget.img_mode == 'Cake':
self.plot_cake_integral()
# calculate right unit for the position line the pattern widget
if self.widget.pattern_q_btn.isChecked():
pos = 4 * np.pi * np.sin(np.deg2rad(tth) / 2) / self.model.calibration_model.wavelength / 1e10
elif self.widget.pattern_tth_btn.isChecked():
pos = tth
elif self.widget.pattern_d_btn.isChecked():
pos = self.model.calibration_model.wavelength / (2 * np.sin(np.deg2rad(tth) / 2)) * 1e10
else:
pos = 0
self.widget.pattern_widget.set_pos_line(pos)
self.widget.click_tth_lbl.setText(self.widget.mouse_tth_lbl.text())
self.widget.click_d_lbl.setText(self.widget.mouse_d_lbl.text())
self.widget.click_q_lbl.setText(self.widget.mouse_q_lbl.text())
self.widget.click_azi_lbl.setText(self.widget.mouse_azi_lbl.text())
self.widget.img_widget_click_tth_lbl.setText(self.widget.mouse_tth_lbl.text())
self.widget.img_widget_click_d_lbl.setText(self.widget.mouse_d_lbl.text())
self.widget.img_widget_click_q_lbl.setText(self.widget.mouse_q_lbl.text())
self.widget.img_widget_click_azi_lbl.setText(self.widget.mouse_azi_lbl.text())
def update_mouse_position_labels(self, x, y, intensity):
x_pos_string = 'X: %4d' % x
y_pos_string = 'Y: %4d' % y
if intensity is None:
int_string = 'I:'
else:
int_string = 'I: %5d' % intensity
self.widget.mouse_x_lbl.setText(x_pos_string)
self.widget.mouse_y_lbl.setText(y_pos_string)
self.widget.mouse_int_lbl.setText(int_string)
def update_mouse_click_position_labels(self, x, y, intensity):
self.update_mouse_position_labels(x, y, intensity)
self.widget.click_x_lbl.setText(self.widget.mouse_x_lbl.text())
self.widget.click_y_lbl.setText(self.widget.mouse_y_lbl.text())
self.widget.click_int_lbl.setText(self.widget.mouse_int_lbl.text())
def pattern_mouse_click(self, x, y):
if self.model.calibration_model.is_calibrated:
if self.widget.img_mode == 'Cake':
self.set_cake_line_position(x)
elif self.widget.img_mode == 'Image':
self.set_image_line_position(x)
def set_cake_line_position(self, x):
x = self._convert_to_tth(x)
upper_ind = np.where(self.model.cake_tth > x)[0]
lower_ind = np.where(self.model.cake_tth < x)[0]
if len(upper_ind) == 0 or len(lower_ind) == 0:
self.widget.cake_widget.plot_cake_integral(np.array([]), np.array([]))
self.widget.cake_widget.deactivate_vertical_line()
return
spacing = self.model.cake_tth[upper_ind[0]] - self.model.cake_tth[lower_ind[-1]]
new_pos = lower_ind[-1] + (x - self.model.cake_tth[lower_ind[-1]]) / spacing + 0.5
self.widget.cake_widget.vertical_line.setValue(new_pos)
self.widget.cake_widget.activate_vertical_line()
self.plot_cake_integral(x)
def set_image_line_position(self, x):
x = self._convert_to_tth(x)
self.widget.img_widget.set_circle_line(
self.model.calibration_model.get_two_theta_array(), np.deg2rad(x))
def _convert_to_tth(self, x):
if self.model.integration_unit == 'q_A^-1':
return self.convert_x_value(x, 'q_A^-1', '2th_deg')
elif self.model.integration_unit == 'd_A':
return self.convert_x_value(x, 'd_A', '2th_deg')
return x
def set_iteration_mode_number(self):
self.model.img_model.set_file_iteration_mode('number')
def set_iteration_mode_time(self):
self.model.img_model.set_file_iteration_mode('time')
def select_source(self, source):
self.model.img_model.select_source(source)
def convert_x_value(self, value, previous_unit, new_unit):
wavelength = self.model.calibration_model.wavelength
if previous_unit == '2th_deg':
tth = value
elif previous_unit == 'q_A^-1':
tth = np.arcsin(
value * 1e10 * wavelength / (4 * np.pi)) * 360 / np.pi
elif previous_unit == 'd_A':
tth = 2 * np.arcsin(wavelength / (2 * value * 1e-10)) * 180 / np.pi
else:
tth = 0
if new_unit == '2th_deg':
res = tth
elif new_unit == 'q_A^-1':
res = 4 * np.pi * \
np.sin(tth / 360 * np.pi) / \
wavelength / 1e10
elif new_unit == 'd_A':
res = wavelength / (2 * np.sin(tth / 360 * np.pi)) * 1e10
else:
res = 0
return res
def load_calibration(self):
filename = open_file_dialog(
self.widget, "Load calibration...",
self.model.working_directories['calibration'],
'*.poni')
if filename != '':
self.model.working_directories['calibration'] = os.path.dirname(filename)
self.model.calibration_model.load(filename)
self.widget.calibration_lbl.setText(
self.model.calibration_model.calibration_name)
self.model.img_model.img_changed.emit()
def auto_process_cb_click(self):
self.model.img_model.autoprocess = self.widget.autoprocess_cb.isChecked()
def save_img(self, filename=None):
if not filename:
img_filename = os.path.splitext(os.path.basename(self.model.img_model.filename))[0]
filename = save_file_dialog(self.widget, "Save Image.",
os.path.join(self.model.working_directories['image'],
img_filename + '.png'),
('Image (*.png);;Data (*.tiff);;Text (*.txt)'))
if filename != '':
if filename.endswith('.png'):
if self.widget.img_mode == 'Cake':
self.widget.cake_widget.deactivate_vertical_line()
self.widget.cake_widget.deactivate_mouse_click_item()
QtWidgets.QApplication.processEvents()
self.widget.cake_widget.save_img(filename)
self.widget.cake_widget.activate_vertical_line()
self.widget.cake_widget.activate_mouse_click_item()
elif self.widget.img_mode == 'Image':
self.widget.img_widget.deactivate_circle_scatter()
self.widget.img_widget.deactivate_roi()
QtWidgets.QApplication.processEvents()
self.widget.img_widget.save_img(filename)
self.widget.img_widget.activate_circle_scatter()
if self.roi_active:
self.widget.img_widget.activate_roi()
elif filename.endswith('.tiff') or filename.endswith('.tif'):
if self.widget.img_mode == 'Image':
im_array = np.int32(self.model.img_data)
elif self.widget.img_mode == 'Cake':
im_array = np.int32(self.model.cake_data)
im_array = np.flipud(im_array)
im = Image.fromarray(im_array)
im.save(filename)
elif filename.endswith('.txt') or filename.endswith('.csv'):
if self.widget.img_mode == 'Image':
return
elif self.widget.img_mode == 'Cake': # saving cake data as a text file for export.
with open(filename, 'w') as out_file: # this is done in an odd and slow way because the headers
# should be floats and the data itself int.
cake_tth = np.insert(self.model.cake_tth, 0, 0)
np.savetxt(out_file, cake_tth[None], fmt='%6.3f')
for azi, row in zip(self.model.cake_azi, self.model.cake_data):
row_str = " ".join(["{:6.0f}".format(el) for el in row])
out_file.write("{:6.2f}".format(azi) + row_str + '\n')
def update_gui_from_configuration(self):
self.widget.img_mask_btn.setChecked(int(self.model.use_mask))
self.widget.mask_transparent_cb.setChecked(bool(self.model.transparent_mask))
self.widget.autoprocess_cb.setChecked(bool(self.model.img_model.autoprocess))
self.widget.calibration_lbl.setText(self.model.calibration_model.calibration_name)
self.update_img_control_widget()
self.update_mask_mode()
self.update_roi_in_gui()
if self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Image':
self.activate_cake_mode()
elif not self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Cake':
self.activate_image_mode()
elif self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Cake':
self._update_cake_line_pos()
self._update_cake_mouse_click_pos()
elif not self.model.current_configuration.auto_integrate_cake and self.widget.img_mode == 'Image':
self._update_image_line_pos()
self._update_image_mouse_click_pos()
def change_view_btn_clicked(self):
if self.view_mode == 'alternative':
self.change_view_to_normal()
elif self.view_mode == 'normal':
self.change_view_to_alternative()
def change_view_to_normal(self):
if self.view_mode == 'normal':
return
self.vertical_splitter_alternative_state = self.widget.vertical_splitter.saveState()
self.horizontal_splitter_alternative_state = self.widget.horizontal_splitter.saveState()
self.widget.vertical_splitter.addWidget(self.widget.integration_pattern_widget)
self.widget.integration_control_widget.setOrientation(QtCore.Qt.Horizontal)
if self.vertical_splitter_normal_state:
self.widget.vertical_splitter.restoreState(self.vertical_splitter_normal_state)
if self.horizontal_splitter_normal_state:
self.widget.horizontal_splitter.restoreState(self.horizontal_splitter_normal_state)
self.widget.img_widget.set_orientation("horizontal")
self.view_mode = 'normal'
def change_view_to_alternative(self):
if self.view_mode == 'alternative':
return
self.vertical_splitter_normal_state = self.widget.vertical_splitter.saveState()
self.horizontal_splitter_normal_state = self.widget.horizontal_splitter.saveState()
self.widget.vertical_splitter_left.insertWidget(0, self.widget.integration_pattern_widget)
self.widget.integration_control_widget.setOrientation(QtCore.Qt.Vertical)
if self.vertical_splitter_alternative_state:
self.widget.vertical_splitter.restoreState(self.vertical_splitter_alternative_state)
if self.horizontal_splitter_alternative_state:
self.widget.horizontal_splitter.restoreState(self.horizontal_splitter_alternative_state)
self.widget.img_widget.set_orientation("vertical")
self.view_mode = 'alternative'
| gpl-3.0 |
DJMuggs/ansible-modules-extras | notification/campfire.py | 8 | 4899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: campfire
version_added: "1.2"
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
version_added: "1.2"
options:
subscription:
description:
- The subscription name to use.
required: true
token:
description:
- API token.
required: true
room:
description:
- Room number to which the message should be sent.
required: true
msg:
description:
- The message body.
required: true
notify:
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ urllib2, cgi ]
author: '"Adam Garside (@fabulops)" <[email protected]>'
'''
EXAMPLES = '''
- campfire: subscription=foo token=12345 room=123 msg="Task completed."
- campfire: subscription=foo token=12345 room=123 notify=loggins
msg="Task completed ... with feeling."
'''
def main():
try:
import urllib2
except ImportError:
module.fail_json(msg="urllib2 is required")
try:
import cgi
except ImportError:
module.fail_json(msg="cgi is required")
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
try:
# Setup basic auth using token as the username
pm = urllib2.HTTPPasswordMgrWithDefaultRealm()
pm.add_password(None, URI, token, 'X')
# Setup Handler and define the opener for the request
handler = urllib2.HTTPBasicAuthHandler(pm)
opener = urllib2.build_opener(handler)
target_url = '%s/room/%s/speak.xml' % (URI, room)
# Send some audible notification if requested
if notify:
req = urllib2.Request(target_url, NSTR % cgi.escape(notify))
req.add_header('Content-Type', 'application/xml')
req.add_header('User-agent', AGENT)
response = opener.open(req)
# Send the message
req = urllib2.Request(target_url, MSTR % cgi.escape(msg))
req.add_header('Content-Type', 'application/xml')
req.add_header('User-agent', AGENT)
response = opener.open(req)
except urllib2.HTTPError, e:
if not (200 <= e.code < 300):
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, e.code))
except Exception, e:
module.fail_json(msg="unable to send msg: %s" % msg)
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
markwal/OctoPrint-ScreenSquish | setup.py | 1 | 1331 | # coding=utf-8
########################################################################################################################
plugin_identifier = "ScreenSquish"
plugin_package = "octoprint_%s" % plugin_identifier
plugin_name = "OctoPrint-ScreenSquish"
plugin_version = "0.4"
plugin_description = "Scalable UI that does some old fashioned (2.3) bootstrap responsive and some collapse etc."
plugin_author = "Mark Walker"
plugin_author_email = "[email protected]"
plugin_url = "https://github.com/markwal/OctoPrint-ScreenSquish"
plugin_license = "AGPLv3"
plugin_requires = []
plugin_additional_data = []
########################################################################################################################
from setuptools import setup
try:
import octoprint_setuptools
except:
print("Could not import OctoPrint's setuptools, are you sure you are running that under "
"the same python installation that OctoPrint is installed under?")
import sys
sys.exit(-1)
setup(**octoprint_setuptools.create_plugin_setup_parameters(
identifier=plugin_identifier,
name=plugin_name,
version=plugin_version,
description=plugin_description,
author=plugin_author,
mail=plugin_author_email,
url=plugin_url,
license=plugin_license,
requires=plugin_requires,
additional_data=plugin_additional_data
))
| agpl-3.0 |
djenniex/CouchPotatoServer | libs/pyasn1/type/univ.py | 74 | 44619 | # ASN.1 "universal" data types
import operator, sys, math
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __repr__(self):
if self.__namedValues is not self.namedValues:
return '%s, %r)' % (base.AbstractSimpleAsn1Item.__repr__(self)[:-1], self.__namedValues)
else:
return base.AbstractSimpleAsn1Item.__repr__(self)
def __and__(self, value): return self.clone(self._value & value)
def __rand__(self, value): return self.clone(value & self._value)
def __or__(self, value): return self.clone(self._value | value)
def __ror__(self, value): return self.clone(value | self._value)
def __xor__(self, value): return self.clone(self._value ^ value)
def __rxor__(self, value): return self.clone(value ^ self._value)
def __lshift__(self, value): return self.clone(self._value << value)
def __rshift__(self, value): return self.clone(self._value >> value)
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __sub__(self, value): return self.clone(self._value - value)
def __rsub__(self, value): return self.clone(value - self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self.clone(value * self._value)
def __mod__(self, value): return self.clone(self._value % value)
def __rmod__(self, value): return self.clone(value % self._value)
def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value): return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(self._value // value)
def __rdiv__(self, value): return self.clone(value // self._value)
else:
def __truediv__(self, value): return self.clone(self._value / value)
def __rtruediv__(self, value): return self.clone(value / self._value)
def __divmod__(self, value): return self.clone(self._value // value)
def __rdivmod__(self, value): return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self): return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self): return float(self._value)
def __abs__(self): return self.clone(abs(self._value))
def __index__(self): return int(self._value)
def __pos__(self): return self.clone(+self._value)
def __neg__(self): return self.clone(-self._value)
def __invert__(self): return self.clone(~self._value)
def __round__(self, n=0):
r = round(self._value, n)
if n:
return self.clone(r)
else:
return r
def __floor__(self): return math.floor(self._value)
def __ceil__(self): return math.ceil(self._value)
if sys.version_info[0:2] > (2, 5):
def __trunc__(self): return self.clone(math.trunc(self._value))
def __lt__(self, value): return self._value < value
def __le__(self, value): return self._value <= value
def __eq__(self, value): return self._value == value
def __ne__(self, value): return self._value != value
def __gt__(self, value): return self._value > value
def __ge__(self, value): return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self): return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self): return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v>>i)&0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % (i,)
)
if j >= len(r):
r.extend([0]*(j-len(r)+1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
try:
all
except NameError: # Python 2.4
def all(iterable):
for element in iterable:
if not element:
return False
return True
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__asNumbersCache = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, unicode):
try:
return value.encode(self._encoding)
except (LookupError, UnicodeEncodeError):
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
elif isinstance(value, (tuple, list)):
try:
return ''.join([ chr(x) for x in value ])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, str):
try:
return value.encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8; byte = 0; r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p+v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p+'0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple(( ord(x) for x in value ))
else:
numbers = tuple(value)
if all(x >= 32 and x <= 126 for x in numbers):
return str(value)
else:
return '0x' + ''.join(( '%.2x' % x for x in numbers ))
def __repr__(self):
r = []
doHex = False
if self._value is not self.defaultValue:
for x in self.asNumbers():
if x < 32 or x > 126:
doHex = True
break
if not doHex:
r.append('%r' % (self._value,))
if self._tagSet is not self.tagSet:
r.append('tagSet=%r' % (self._tagSet,))
if self._subtypeSpec is not self.subtypeSpec:
r.append('subtypeSpec=%r' % (self._subtypeSpec,))
if self.encoding is not self._encoding:
r.append('encoding=%r' % (self._encoding,))
if doHex:
r.append('hexValue=%r' % ''.join([ '%.2x' % x for x in self.asNumbers() ]))
return '%s(%s)' % (self.__class__.__name__, ', '.join(r))
if sys.version_info[0] <= 2:
def __str__(self): return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self): return self._value
def asNumbers(self):
if self.__asNumbersCache is None:
self.__asNumbersCache = tuple([ ord(x) for x in self._value ])
return self.__asNumbersCache
else:
def __str__(self): return self._value.decode(self._encoding, 'ignore')
def __bytes__(self): return self._value
def asOctets(self): return self._value
def asNumbers(self):
if self.__asNumbersCache is None:
self.__asNumbersCache = tuple(self._value)
return self.__asNumbersCache
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def __int__(self): return int(self._value)
def __float__(self): return float(self._value)
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = (int,)
numericTypes = intTypes + (float,)
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other): return self.clone(self._value + other)
def __radd__(self, other): return self.clone(other + self._value)
def asTuple(self): return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self): return self.prettyPrint()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.prettyPrint())
def index(self, suboid): return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
class Real(base.AbstractSimpleAsn1Item):
binEncBase = None # binEncBase = 16 is recommended for large numbers
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
if not isinstance(value[0], numericTypes) or \
not isinstance(value[1], intTypes) or \
not isinstance(value[2], intTypes):
raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
if isinstance(value[0], float) and \
self._inf and value[0] in self._inf:
return value[0]
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, (str, float)):
if isinstance(value, str):
try:
value = float(value)
except ValueError:
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def prettyPrint(self, scope=0):
if self.isInfinity():
return self.prettyOut(self._value)
else:
return str(float(self))
def isPlusInfinity(self): return self._value == self._plusInf
def isMinusInfinity(self): return self._value == self._minusInf
def isInfinity(self): return self._value in self._inf
def __str__(self): return str(float(self))
def __add__(self, value): return self.clone(float(self) + value)
def __radd__(self, value): return self + value
def __mul__(self, value): return self.clone(float(self) * value)
def __rmul__(self, value): return self * value
def __sub__(self, value): return self.clone(float(self) - value)
def __rsub__(self, value): return self.clone(value - float(self))
def __mod__(self, value): return self.clone(float(self) % value)
def __rmod__(self, value): return self.clone(value % float(self))
def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value): return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(float(self) / value)
def __rdiv__(self, value): return self.clone(value / float(self))
else:
def __truediv__(self, value): return self.clone(float(self) / value)
def __rtruediv__(self, value): return self.clone(value / float(self))
def __divmod__(self, value): return self.clone(float(self) // value)
def __rdivmod__(self, value): return self.clone(value // float(self))
def __int__(self): return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self): return self.clone(abs(float(self)))
def __pos__(self): return self.clone(+float(self))
def __neg__(self): return self.clone(-float(self))
def __round__(self, n=0):
r = round(float(self), n)
if n:
return self.clone(r)
else:
return r
def __floor__(self): return self.clone(math.floor(float(self)))
def __ceil__(self): return self.clone(math.ceil(float(self)))
if sys.version_info[0:2] > (2, 5):
def __trunc__(self): return self.clone(math.trunc(float(self)))
def __lt__(self, value): return float(self) < value
def __le__(self, value): return float(self) <= value
def __eq__(self, value): return float(self) == value
def __ne__(self, value): return float(self) != value
def __gt__(self, value): return float(self) > value
def __ge__(self, value): return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(float(self))
else:
def __bool__(self): return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
strictConstraints = False
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
t = self._componentType
if t is None:
return
if not t.isSameTypeWith(value,matchConstraints=self.strictConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, t))
if self.strictConstraints and \
not t.isSuperTypeOf(value, matchTags=False):
raise error.PyAsn1Error('Component value is constraints-incompatible: %r vs %r' % (value, t))
def getComponentByPosition(self, idx): return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' '*scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
def prettyPrintType(self, scope=0):
scope = scope + 1
r = '%s -> %s {\n' % (self.getTagSet(), self.__class__.__name__)
if self._componentType is not None:
r = r + ' '*scope
r = r + self._componentType.prettyPrintType(scope)
return r + '\n' + ' '*(scope-1) + '}'
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
strictConstraints = False
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
if componentType is None:
componentType = self.componentType
base.AbstractConstructedAsn1Item.__init__(
self, componentType.clone(), tagSet, subtypeSpec, sizeSpec
)
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSameTypeWith(value,matchConstraints=self.strictConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, t))
if self.strictConstraints and \
not t.isSuperTypeOf(value, matchTags=False):
raise error.PyAsn1Error('Component value is constraints-incompatible: %r vs %r' % (value, t))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name),value,verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None,
verifyConstraints=True,
exactTypes=False,
matchTags=True,
matchConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' '*scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
def prettyPrintType(self, scope=0):
scope = scope + 1
r = '%s -> %s {\n' % (self.getTagSet(), self.__class__.__name__)
for idx in range(len(self.componentType)):
r = r + ' '*scope
r = r + '"%s"' % self.componentType.getNameByPosition(idx)
r = '%s = %s\n' % (
r, self._componentType.getTypeByPosition(idx).prettyPrintType(scope)
)
return r + '\n' + ' '*(scope-1) + '}'
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0): return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def __len__(self): return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self): pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{ self.getTagSet(): self },
{ eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
self
)
# XXX
# coercion rules?
| gpl-3.0 |
Atom-machinerule/OpenQbo | qbo_stereo_anaglyph/hrl_lib/src/hrl_lib/msg/_PlanarBaseVelLimits.py | 2 | 6154 | """autogenerated by genmsg_py from PlanarBaseVelLimits.msg. Do not edit."""
import roslib.message
import struct
import std_msgs.msg
class PlanarBaseVelLimits(roslib.message.Message):
_md5sum = "79d6a8bc99f5988deabb3198a791ba92"
_type = "hrl_lib/PlanarBaseVelLimits"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# max allowable velocities in positive and negative X and Y directions
# and max allowable angular velocity.
Header header
float64 xvel_pos_max
float64 xvel_neg_max
float64 yvel_pos_max
float64 yvel_neg_max
float64 avel_max
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','xvel_pos_max','xvel_neg_max','yvel_pos_max','yvel_neg_max','avel_max']
_slot_types = ['Header','float64','float64','float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,xvel_pos_max,xvel_neg_max,yvel_pos_max,yvel_neg_max,avel_max
@param args: complete set of field values, in .msg order
@param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PlanarBaseVelLimits, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg._Header.Header()
if self.xvel_pos_max is None:
self.xvel_pos_max = 0.
if self.xvel_neg_max is None:
self.xvel_neg_max = 0.
if self.yvel_pos_max is None:
self.yvel_pos_max = 0.
if self.yvel_neg_max is None:
self.yvel_neg_max = 0.
if self.avel_max is None:
self.avel_max = 0.
else:
self.header = std_msgs.msg._Header.Header()
self.xvel_pos_max = 0.
self.xvel_neg_max = 0.
self.yvel_pos_max = 0.
self.yvel_neg_max = 0.
self.avel_max = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
@param buff: buffer
@type buff: StringIO
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_5d.pack(_x.xvel_pos_max, _x.xvel_neg_max, _x.yvel_pos_max, _x.yvel_neg_max, _x.avel_max))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
@param str: byte array of serialized message
@type str: str
"""
try:
if self.header is None:
self.header = std_msgs.msg._Header.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.header.frame_id = str[start:end]
_x = self
start = end
end += 40
(_x.xvel_pos_max, _x.xvel_neg_max, _x.yvel_pos_max, _x.yvel_neg_max, _x.avel_max,) = _struct_5d.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
@param buff: buffer
@type buff: StringIO
@param numpy: numpy python module
@type numpy module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_5d.pack(_x.xvel_pos_max, _x.xvel_neg_max, _x.yvel_pos_max, _x.yvel_neg_max, _x.avel_max))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
@param str: byte array of serialized message
@type str: str
@param numpy: numpy python module
@type numpy: module
"""
try:
if self.header is None:
self.header = std_msgs.msg._Header.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.header.frame_id = str[start:end]
_x = self
start = end
end += 40
(_x.xvel_pos_max, _x.xvel_neg_max, _x.yvel_pos_max, _x.yvel_neg_max, _x.avel_max,) = _struct_5d.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
_struct_I = roslib.message.struct_I
_struct_5d = struct.Struct("<5d")
_struct_3I = struct.Struct("<3I")
| lgpl-2.1 |
unreal666/outwiker | src/outwiker/pages/text/textpage.py | 3 | 1215 | # -*- coding: utf-8 -*-
"""
Необходимые классы для создания страниц с текстом
"""
from outwiker.core.tree import WikiPage
from outwiker.pages.text.textpanel import TextPanel
from outwiker.core.factory import PageFactory
class TextWikiPage (WikiPage):
"""
Класс текстовых страниц
"""
def __init__(self, path, title, parent, readonly=False):
WikiPage.__init__(self, path, title, parent, readonly)
@staticmethod
def getTypeString():
return u"text"
class TextPageFactory (PageFactory):
"""
Фабрика для создания текстовой страницы и ее представления
"""
def getPageType(self):
return TextWikiPage
@property
def title(self):
"""
Название страницы, показываемое пользователю
"""
return _(u"Text Page")
def getPageView(self, parent, application):
"""
Вернуть контрол, который будет отображать и редактировать страницу
"""
return TextPanel(parent, application)
| gpl-3.0 |
filippog/pysnmp | pysnmp/proto/secmod/rfc3414/auth/hmacmd5.py | 4 | 3369 | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <[email protected]>
# License: http://pysnmp.sf.net/license.html
#
try:
from hashlib import md5
except ImportError:
import md5
md5 = md5.new
from pyasn1.type import univ
from pysnmp.proto.secmod.rfc3414.auth import base
from pysnmp.proto.secmod.rfc3414 import localkey
from pysnmp.proto import errind, error
_twelveZeros = univ.OctetString((0,)*12).asOctets()
_fortyEightZeros = (0,)*48
# rfc3414: 6.2.4
class HmacMd5(base.AbstractAuthenticationService):
serviceID = (1, 3, 6, 1, 6, 3, 10, 1, 1, 2) # usmHMACMD5AuthProtocol
__ipad = [0x36]*64
__opad = [0x5C]*64
def hashPassphrase(self, authKey):
return localkey.hashPassphraseMD5(authKey)
def localizeKey(self, authKey, snmpEngineID):
return localkey.localizeKeyMD5(authKey, snmpEngineID)
# 6.3.1
def authenticateOutgoingMsg(self, authKey, wholeMsg):
# Here we expect calling secmod to indicate where the digest
# should be in the substrate. Also, it pre-sets digest placeholder
# so we hash wholeMsg out of the box.
# Yes, that's ugly but that's rfc...
l = wholeMsg.find(_twelveZeros)
if l == -1:
raise error.ProtocolError('Cant locate digest placeholder')
wholeHead = wholeMsg[:l]
wholeTail = wholeMsg[l+12:]
# 6.3.1.1
# 6.3.1.2a
extendedAuthKey = authKey.asNumbers() + _fortyEightZeros
# 6.3.1.2b --> noop
# 6.3.1.2c
k1 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__ipad)
)
# 6.3.1.2d --> noop
# 6.3.1.2e
k2 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__opad)
)
# 6.3.1.3
d1 = md5(k1.asOctets()+wholeMsg).digest()
# 6.3.1.4
d2 = md5(k2.asOctets()+d1).digest()
mac = d2[:12]
# 6.3.1.5 & 6
return wholeHead + mac + wholeTail
# 6.3.2
def authenticateIncomingMsg(self, authKey, authParameters, wholeMsg):
# 6.3.2.1 & 2
if len(authParameters) != 12:
raise error.StatusInformation(
errorIndication=errind.authenticationError
)
# 6.3.2.3
l = wholeMsg.find(authParameters.asOctets())
if l == -1:
raise error.ProtocolError('Cant locate digest in wholeMsg')
wholeHead = wholeMsg[:l]
wholeTail = wholeMsg[l+12:]
authenticatedWholeMsg = wholeHead + _twelveZeros + wholeTail
# 6.3.2.4a
extendedAuthKey = authKey.asNumbers() + _fortyEightZeros
# 6.3.2.4b --> noop
# 6.3.2.4c
k1 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__ipad)
)
# 6.3.2.4d --> noop
# 6.3.2.4e
k2 = univ.OctetString(
map(lambda x, y: x^y, extendedAuthKey, self.__opad)
)
# 6.3.2.5a
d1 = md5(k1.asOctets()+authenticatedWholeMsg).digest()
# 6.3.2.5b
d2 = md5(k2.asOctets()+d1).digest()
# 6.3.2.5c
mac = d2[:12]
# 6.3.2.6
if mac != authParameters:
raise error.StatusInformation(
errorIndication=errind.authenticationFailure
)
return authenticatedWholeMsg
| bsd-3-clause |
mongkok/defocus | defocus/users/migrations/0001_initial.py | 1 | 2373 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-16 21:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('first_name', models.CharField(blank=True, max_length=128, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=128, verbose_name='last name')),
('language', models.CharField(default='es', max_length=2)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'ordering': ('-id',),
},
),
]
| mit |
jjingrong/PONUS-1.2 | venv/build/django/django/contrib/auth/management/commands/changepassword.py | 76 | 2071 | from __future__ import unicode_literals
import getpass
from optparse import make_option
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = "Change a user's password for django.contrib.auth."
requires_model_validation = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options.get('database')).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| mit |
MjAbuz/watchdog | import/parse/voteview.py | 3 | 1969 | """
parse voteview partisanship data
"""
HOUSE_DAT = "../data/crawl/voteview/HL01110C21_PRES_BSSE.DAT"
SENATE_DAT = "../data/crawl/voteview/SL01110C21_BSSE.dat"
state_map = { #@@ import to state json as icpsr
41: 'AL', 81: 'AK', 61: 'AZ', 42: 'AR', 71: 'CA', 62: 'CO', 1: 'CT',
11: 'DE', 43: 'FL', 44: 'GA', 82: 'HI', 63: 'ID', 21: 'IL', 22: 'IN',
31: 'IA', 32: 'KS', 51: 'KY', 45: 'LA', 2: 'ME', 52: 'MD', 3: 'MA',
23: 'MI', 33: 'MN', 46: 'MS', 34: 'MO', 64: 'MT', 35: 'NE', 65: 'NV',
4: 'NH', 12: 'NJ', 66: 'NM', 13: 'NY', 47: 'NC', 36: 'ND', 24: 'OH',
53: 'OK', 72: 'OR', 14: 'PA', 5: 'RI', 48: 'SC', 37: 'SD', 54: 'TN',
49: 'TX', 67: 'UT', 6: 'VT', 40: 'VA', 73: 'WA', 56: 'WV', 25: 'WI',
68: 'WY', 55: 'DC'
}
import web
import tools
def parse():
for fn in [HOUSE_DAT, SENATE_DAT]:
for line in file(fn):
out = web.storage()
out.congress = int(line[0:4])
out.icpsr_id = int(line[4:10])
out.icpsr_state = int(line[10:13])
out.district = int(line[13:15])
out.state_name = line[15:23].strip()
out.party_code = int(line[23:28])
out.last_name = line[28:41].strip()
out.dim1 = float(line[41:47])
out.dim2 = float(line[47:54])
out.std1 = float(line[54:61])
out.std2 = float(line[61:68])
out.corr = float(line[68:75])
out.loglike = float(line[75:87])
out.n_votes = int(line[87:92])
out.n_errs = int(line[92:97])
out.n_geomeanprob = float(line[97:104])
if out.icpsr_state in state_map:
out.state_code = state_map[out.icpsr_state]
if out.district:
out.district_id = out.state_code + '-' + str(out.district).zfill(2)
else:
out.district_id = out.state_code
yield out
if __name__ == "__main__":
tools.export(parse())
| agpl-3.0 |
twiest/openshift-tools | ansible/roles/lib_zabbix/library/zbx_user.py | 13 | 7263 | #!/usr/bin/env python
'''
ansible module for zabbix users
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix user ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_usergroups(zapi, usergroups):
''' Get usergroups
'''
ugroups = []
for ugr in usergroups:
content = zapi.get_content('usergroup',
'get',
{'search': {'name': ugr},
#'selectUsers': 'userid',
#'getRights': 'extend'
})
if content['result']:
ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']})
return ugroups or None
def get_passwd(passwd):
'''Determine if password is set, if not, return 'zabbix'
'''
if passwd:
return passwd
return 'zabbix'
def get_usertype(user_type):
'''
Determine zabbix user account type
'''
if not user_type:
return None
utype = 1
if 'super' in user_type:
utype = 3
elif 'admin' in user_type or user_type == 'admin':
utype = 2
return utype
def main():
'''
ansible zabbix module for users
'''
##def user(self, name, state='present', params=None):
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
login=dict(default=None, type='str'),
first_name=dict(default=None, type='str'),
last_name=dict(default=None, type='str'),
user_type=dict(default=None, type='str'),
password=dict(default=None, type='str'),
refresh=dict(default=None, type='int'),
autologout=dict(default=None, type='int'),
update_password=dict(default=False, type='bool'),
user_groups=dict(default=[], type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
## before we can create a user media and users with media types we need media
zbx_class_name = 'user'
idname = "userid"
state = module.params['state']
content = zapi.get_content(zbx_class_name,
'get',
{'output': 'extend',
'search': {'alias': module.params['login']},
"selectUsrgrps": 'usergrpid',
})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
if state == 'absent':
if not exists(content) or len(content['result']) == 0:
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
params = {'alias': module.params['login'],
'passwd': get_passwd(module.params['password']),
'usrgrps': get_usergroups(zapi, module.params['user_groups']),
'name': module.params['first_name'],
'surname': module.params['last_name'],
'refresh': module.params['refresh'],
'autologout': module.params['autologout'],
'type': get_usertype(module.params['user_type']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('Error'):
module.exit_json(failed=True, changed=False, results=content, state='present')
module.exit_json(changed=True, results=content['result'], state='present')
# already exists, we need to update it
# let's compare properties
differences = {}
# Update password
if not module.params['update_password']:
params.pop('passwd', None)
zab_results = content['result'][0]
for key, value in params.items():
if key == 'usrgrps':
# this must be done as a list of ordered dictionaries fails comparison
# if the current zabbix group list is not all in the
# provided group list
# or the provided group list is not all in the current zabbix
# group list
if not all([_ in value for _ in zab_results[key]]) \
or not all([_ in zab_results[key] for _ in value]):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| apache-2.0 |
mubix/pth-toolkit | lib/python2.7/site-packages/samba/tests/samba_tool/ntacl.py | 42 | 6329 | # Unix SMB/CIFS implementation.
# Copyright (C) Andrew Bartlett 2012
#
# Based on user.py:
# Copyright (C) Sean Dague <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import time
import ldb
from samba.tests.samba_tool.base import SambaToolCmdTest
import random
class NtACLCmdSysvolTestCase(SambaToolCmdTest):
"""Tests for samba-tool ntacl sysvol* subcommands"""
def test_ntvfs(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
def test_s3fs(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
def test_ntvfs_check(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "sysvolcheck")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
def test_s3fs_check(self):
(result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "sysvolcheck")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
class NtACLCmdGetSetTestCase(SambaToolCmdTest):
"""Tests for samba-tool ntacl get/set subcommands"""
acl = "O:DAG:DUD:P(A;OICI;0x001f01ff;;;DA)(A;OICI;0x001f01ff;;;EA)(A;OICIIO;0x001f01ff;;;CO)(A;OICI;0x001f01ff;;;DA)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)(A;OICI;0x001200a9;;;ED)S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
def test_ntvfs(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
def test_s3fs(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(out,"","Shouldn't be any output messages")
def test_ntvfs_check(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-ntvfs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "get", tempf,
"--use-ntvfs", "--as-sddl")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(self.acl+"\n", out, "Output should be the ACL")
def test_s3fs_check(self):
path = os.environ['SELFTEST_PREFIX']
tempf = os.path.join(path,"pytests"+str(int(100000*random.random())))
open(tempf, 'w').write("empty")
(result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
"--use-s3fs")
self.assertCmdSuccess(result)
self.assertEquals(out,"","Shouldn't be any output messages")
self.assertEquals(err,"","Shouldn't be any error messages")
# Now check they were set correctly
(result, out, err) = self.runsubcmd("ntacl", "get", tempf,
"--use-s3fs", "--as-sddl")
self.assertCmdSuccess(result)
self.assertEquals(err,"","Shouldn't be any error messages")
self.assertEquals(self.acl+"\n", out,"Output should be the ACL")
| bsd-2-clause |
chrislit/abydos | tests/distance/test_distance_baroni_urbani_buser_ii.py | 1 | 7009 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_baroni_urbani_buser_ii.
This module contains unit tests for abydos.distance.BaroniUrbaniBuserII
"""
import unittest
from abydos.distance import BaroniUrbaniBuserII
class BaroniUrbaniBuserIITestCases(unittest.TestCase):
"""Test BaroniUrbaniBuserII functions.
abydos.distance.BaroniUrbaniBuserII
"""
cmp = BaroniUrbaniBuserII()
cmp_no_d = BaroniUrbaniBuserII(alphabet=0)
def test_baroni_urbani_buser_ii_sim(self):
"""Test abydos.distance.BaroniUrbaniBuserII.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.8951383588)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.8951383588)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.8951383588)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.8951383588)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.9199236936
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 1.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(
self.cmp_no_d.sim('Nigel', 'Niall'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Niall', 'Nigel'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Colin', 'Coiln'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('Coiln', 'Colin'), 0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.5
)
def test_baroni_urbani_buser_ii_dist(self):
"""Test abydos.distance.BaroniUrbaniBuserII.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.1048616412)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.1048616412)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.1048616412)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.1048616412)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.0800763064
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 0.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(
self.cmp_no_d.dist('Nigel', 'Niall'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Niall', 'Nigel'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Colin', 'Coiln'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('Coiln', 'Colin'), 0.6666666667
)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 0.5
)
def test_baroni_urbani_buser_ii_corr(self):
"""Test abydos.distance.BaroniUrbaniBuserII.corr."""
# Base cases
self.assertEqual(self.cmp.corr('', ''), 1.0)
self.assertEqual(self.cmp.corr('a', ''), -1.0)
self.assertEqual(self.cmp.corr('', 'a'), -1.0)
self.assertEqual(self.cmp.corr('abc', ''), -1.0)
self.assertEqual(self.cmp.corr('', 'abc'), -1.0)
self.assertEqual(self.cmp.corr('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.corr('abcd', 'efgh'), -1.0)
self.assertAlmostEqual(self.cmp.corr('Nigel', 'Niall'), 0.7902767176)
self.assertAlmostEqual(self.cmp.corr('Niall', 'Nigel'), 0.7902767176)
self.assertAlmostEqual(self.cmp.corr('Colin', 'Coiln'), 0.7902767176)
self.assertAlmostEqual(self.cmp.corr('Coiln', 'Colin'), 0.7902767176)
self.assertAlmostEqual(
self.cmp.corr('ATCAACGAGT', 'AACGATTAG'), 0.8398473871
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.corr('', ''), 1.0)
self.assertEqual(self.cmp_no_d.corr('a', ''), -1.0)
self.assertEqual(self.cmp_no_d.corr('', 'a'), -1.0)
self.assertEqual(self.cmp_no_d.corr('abc', ''), -1.0)
self.assertEqual(self.cmp_no_d.corr('', 'abc'), -1.0)
self.assertEqual(self.cmp_no_d.corr('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.corr('abcd', 'efgh'), -1.0)
self.assertAlmostEqual(
self.cmp_no_d.corr('Nigel', 'Niall'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('Niall', 'Nigel'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('Colin', 'Coiln'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('Coiln', 'Colin'), -0.3333333333
)
self.assertAlmostEqual(
self.cmp_no_d.corr('ATCAACGAGT', 'AACGATTAG'), 0.0
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
h3biomed/ansible | test/units/plugins/lookup/test_manifold.py | 38 | 27127 | # (c) 2018, Arigato Machine Inc.
# (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, call
from ansible.errors import AnsibleError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils import six
from ansible.plugins.lookup.manifold import ManifoldApiClient, LookupModule, ApiError
import json
API_FIXTURES = {
'https://api.marketplace.manifold.co/v1/resources':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
},
{
"body": {
"label": "resource-2",
"name": "Resource 2"
},
"id": "rid-2"
}
],
'https://api.marketplace.manifold.co/v1/resources?label=resource-1':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
}
],
'https://api.marketplace.manifold.co/v1/resources?label=resource-2':
[
{
"body": {
"label": "resource-2",
"name": "Resource 2"
},
"id": "rid-2"
}
],
'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
}
],
'https://api.marketplace.manifold.co/v1/resources?project_id=pid-1':
[
{
"body": {
"label": "resource-2",
"name": "Resource 2"
},
"id": "rid-2"
}
],
'https://api.marketplace.manifold.co/v1/resources?project_id=pid-2':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
},
{
"body": {
"label": "resource-3",
"name": "Resource 3"
},
"id": "rid-3"
}
],
'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1':
[
{
"body": {
"label": "resource-1",
"name": "Resource 1"
},
"id": "rid-1"
}
],
'https://api.marketplace.manifold.co/v1/projects':
[
{
"body": {
"label": "project-1",
"name": "Project 1",
},
"id": "pid-1",
},
{
"body": {
"label": "project-2",
"name": "Project 2",
},
"id": "pid-2",
}
],
'https://api.marketplace.manifold.co/v1/projects?label=project-2':
[
{
"body": {
"label": "project-2",
"name": "Project 2",
},
"id": "pid-2",
}
],
'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1':
[
{
"body": {
"resource_id": "rid-1",
"values": {
"RESOURCE_TOKEN_1": "token-1",
"RESOURCE_TOKEN_2": "token-2"
}
},
"id": "cid-1",
}
],
'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-2':
[
{
"body": {
"resource_id": "rid-2",
"values": {
"RESOURCE_TOKEN_3": "token-3",
"RESOURCE_TOKEN_4": "token-4"
}
},
"id": "cid-2",
}
],
'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-3':
[
{
"body": {
"resource_id": "rid-3",
"values": {
"RESOURCE_TOKEN_1": "token-5",
"RESOURCE_TOKEN_2": "token-6"
}
},
"id": "cid-3",
}
],
'https://api.identity.manifold.co/v1/teams':
[
{
"id": "tid-1",
"body": {
"name": "Team 1",
"label": "team-1"
}
},
{
"id": "tid-2",
"body": {
"name": "Team 2",
"label": "team-2"
}
}
]
}
def mock_fixture(open_url_mock, fixture=None, data=None, headers=None):
if not headers:
headers = {}
if fixture:
data = json.dumps(API_FIXTURES[fixture])
if 'content-type' not in headers:
headers['content-type'] = 'application/json'
open_url_mock.return_value.read.return_value = data
open_url_mock.return_value.headers = headers
class TestManifoldApiClient(unittest.TestCase):
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_sends_default_headers(self, open_url_mock):
mock_fixture(open_url_mock, data='hello')
client = ManifoldApiClient('token-123')
client.request('test', 'endpoint')
open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_decodes_json(self, open_url_mock):
mock_fixture(open_url_mock, fixture='https://api.marketplace.manifold.co/v1/resources')
client = ManifoldApiClient('token-123')
self.assertIsInstance(client.request('marketplace', 'resources'), list)
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_streams_text(self, open_url_mock):
mock_fixture(open_url_mock, data='hello', headers={'content-type': "text/plain"})
client = ManifoldApiClient('token-123')
self.assertEqual('hello', client.request('test', 'endpoint'))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_processes_parameterized_headers(self, open_url_mock):
mock_fixture(open_url_mock, data='hello')
client = ManifoldApiClient('token-123')
client.request('test', 'endpoint', headers={'X-HEADER': 'MANIFOLD'})
open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123',
'X-HEADER': 'MANIFOLD'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_passes_arbitrary_parameters(self, open_url_mock):
mock_fixture(open_url_mock, data='hello')
client = ManifoldApiClient('token-123')
client.request('test', 'endpoint', use_proxy=False, timeout=5)
open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0',
use_proxy=False, timeout=5)
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_incorrect_json(self, open_url_mock):
mock_fixture(open_url_mock, data='noJson', headers={'content-type': "application/json"})
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('JSON response can\'t be parsed while requesting https://api.test.manifold.co/v1/endpoint:\n'
'noJson',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_status_500(self, open_url_mock):
open_url_mock.side_effect = HTTPError('https://api.test.manifold.co/v1/endpoint',
500, 'Server error', {}, six.StringIO('ERROR'))
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Server returned: HTTP Error 500: Server error while requesting '
'https://api.test.manifold.co/v1/endpoint:\nERROR',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_bad_url(self, open_url_mock):
open_url_mock.side_effect = URLError('URL is invalid')
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Failed lookup url for https://api.test.manifold.co/v1/endpoint : <url'
'open error URL is invalid>',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_ssl_error(self, open_url_mock):
open_url_mock.side_effect = SSLValidationError('SSL Error')
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Error validating the server\'s certificate for https://api.test.manifold.co/v1/endpoint: '
'SSL Error',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_request_raises_on_connection_error(self, open_url_mock):
open_url_mock.side_effect = ConnectionError('Unknown connection error')
client = ManifoldApiClient('token-123')
with self.assertRaises(ApiError) as context:
client.request('test', 'endpoint')
self.assertEqual('Error connecting to https://api.test.manifold.co/v1/endpoint: Unknown connection error',
str(context.exception))
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_resources_get_all(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/resources'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_resources())
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_resources_filter_label(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/resources?label=resource-1'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_resources(label='resource-1'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_resources_filter_team_and_project(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_resources(team_id='tid-1', project_id='pid-1'))
args, kwargs = open_url_mock.call_args
url_called = args[0]
# Dict order is not guaranteed, so an url may have querystring parameters order randomized
self.assertIn('team_id=tid-1', url_called)
self.assertIn('project_id=pid-1', url_called)
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_teams_get_all(self, open_url_mock):
url = 'https://api.identity.manifold.co/v1/teams'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_teams())
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_teams_filter_label(self, open_url_mock):
url = 'https://api.identity.manifold.co/v1/teams'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url][1:2], client.get_teams(label='team-2'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_projects_get_all(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/projects'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_projects())
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_projects_filter_label(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/projects?label=project-2'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_projects(label='project-2'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
@patch('ansible.plugins.lookup.manifold.open_url')
def test_get_credentials(self, open_url_mock):
url = 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1'
mock_fixture(open_url_mock, fixture=url)
client = ManifoldApiClient('token-123')
self.assertListEqual(API_FIXTURES[url], client.get_credentials(resource_id='rid-1'))
open_url_mock.assert_called_with(url,
headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
http_agent='python-manifold-ansible-1.0.0')
class TestLookupModule(unittest.TestCase):
def setUp(self):
self.lookup = LookupModule()
self.lookup._load_name = "manifold"
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_all(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2',
'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_one_resource(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?label=resource-2']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run(['resource-2'], api_token='token-123'))
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None, label='resource-2')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_two_resources(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2',
'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run(['resource-1', 'resource-2'], api_token='token-123'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
@patch('ansible.plugins.lookup.manifold.display')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_get_resources_with_same_credential_names(self, client_mock, display_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-5',
'RESOURCE_TOKEN_2': 'token-6'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-2']
client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects?label=project-2']
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-2'))
client_mock.assert_called_with('token-123')
display_mock.warning.assert_has_calls([
call("'RESOURCE_TOKEN_1' with label 'resource-1' was replaced by resource data with label 'resource-3'"),
call("'RESOURCE_TOKEN_2' with label 'resource-1' was replaced by resource data with label 'resource-3'")],
any_order=True
)
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-2')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_filter_by_team(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1']
client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', team='team-1'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id='tid-1', project_id=None)
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_filter_by_project(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
'RESOURCE_TOKEN_4': 'token-4'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-1']
client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_filter_by_team_and_project(self, client_mock):
expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
'RESOURCE_TOKEN_2': 'token-2'
}]
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1']
client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
'credentials?resource_id={0}'.format(x)]
self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
client_mock.assert_called_with('token-123')
client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_raise_team_doesnt_exist(self, client_mock):
client_mock.return_value.get_teams.return_value = []
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123', team='no-team')
self.assertEqual("Team 'no-team' does not exist",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_raise_project_doesnt_exist(self, client_mock):
client_mock.return_value.get_projects.return_value = []
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123', project='no-project')
self.assertEqual("Project 'no-project' does not exist",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_raise_resource_doesnt_exist(self, client_mock):
client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
with self.assertRaises(AnsibleError) as context:
self.lookup.run(['resource-1', 'no-resource-1', 'no-resource-2'], api_token='token-123')
self.assertEqual("Resource(s) no-resource-1, no-resource-2 do not exist",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_catch_api_error(self, client_mock):
client_mock.side_effect = ApiError('Generic error')
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123')
self.assertEqual("API Error: Generic error",
str(context.exception))
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_catch_unhandled_exception(self, client_mock):
client_mock.side_effect = Exception('Unknown error')
with self.assertRaises(AnsibleError) as context:
self.lookup.run([], api_token='token-123')
self.assertTrue('Exception: Unknown error' in str(context.exception))
@patch('ansible.plugins.lookup.manifold.os.getenv')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_falls_back_to_env_var(self, client_mock, getenv_mock):
getenv_mock.return_value = 'token-321'
client_mock.return_value.get_resources.return_value = []
client_mock.return_value.get_credentials.return_value = []
self.lookup.run([])
getenv_mock.assert_called_with('MANIFOLD_API_TOKEN')
client_mock.assert_called_with('token-321')
@patch('ansible.plugins.lookup.manifold.os.getenv')
@patch('ansible.plugins.lookup.manifold.ManifoldApiClient')
def test_falls_raises_on_no_token(self, client_mock, getenv_mock):
getenv_mock.return_value = None
client_mock.return_value.get_resources.return_value = []
client_mock.return_value.get_credentials.return_value = []
with self.assertRaises(AnsibleError) as context:
self.lookup.run([])
self.assertEqual('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var',
str(context.exception))
| gpl-3.0 |
schreiberx/sweet | benchmarks_sphere/galewsky_varying_varying_dt_and_tsm/benchmark_create_jobs.py | 1 | 6312 | #! /usr/bin/env python3
import os
import sys
import math
import numpy as np
from itertools import product
from mule_local.JobGeneration import *
from mule.JobParallelization import *
from mule.JobParallelizationDimOptions import *
p = JobGeneration()
verbose = False
#verbose = True
##################################################
##################################################
p.compile.mode = 'release'
#p.compile.sweet_mpi = 'disable'
p.runtime.space_res_spectral = 128
p.parallelization.core_oversubscription = False
p.parallelization.core_affinity = 'compact'
p.compile.threading = 'omp'
p.compile.rexi_thread_parallel_sum = 'disable'
gen_reference_solution = True
p.runtime.benchmark_name = "galewsky"
p.runtime.max_simulation_time = 60*60*24*8 # 8 days
p.runtime.output_timestep_size = 60*60*6 # Generate output every 6 hours
p.runtime.output_file_mode = 'bin'
params_timestep_sizes_explicit_ = [15*(2**i) for i in range(0, 4)]
params_timestep_sizes_implicit_ = [15*(2**i) for i in range(2, 6)]
params_timestep_sizes_sl_ = [15*(2**i) for i in range(2, 6)]
params_timestep_size_reference = 30.0
# Parallelization
params_pspace_num_cores_per_rank = [p.platform_resources.num_cores_per_socket]
params_pspace_num_threads_per_rank = [p.platform_resources.num_cores_per_socket]
unique_id_filter = []
unique_id_filter.append('compile')
#unique_id_filter.append('runtime.galewsky_params')
unique_id_filter.append('runtime.rexi')
unique_id_filter.append('runtime.benchmark')
unique_id_filter.append('runtime.max_simulation_time')
p.unique_id_filter = unique_id_filter
p.runtime.output_timestep_size = p.runtime.max_simulation_time
##########################################################################
##########################################################################
##########################################################################
def estimateWallclockTime(p):
return 12*60*60
p.compile.lapack = 'enable'
p.compile.mkl = 'disable'
p.compilecommand_in_jobscript = False
#
# Run simulation on plane or sphere
#
p.compile.program = 'swe_sphere'
p.compile.plane_spectral_space = 'disable'
p.compile.plane_spectral_dealiasing = 'disable'
p.compile.sphere_spectral_space = 'enable'
p.compile.sphere_spectral_dealiasing = 'enable'
p.compile.benchmark_timings = 'enable'
p.compile.quadmath = 'disable'
#
# Activate Fortran source
#
p.compile.fortran_source = 'enable'
# Verbosity mode
p.runtime.verbosity = 0
#
# Mode and Physical resolution
#
p.runtime.space_res_spectral = 128
p.runtime.space_res_physical = -1
#
# Compute error
#
p.runtime.compute_error = 0
#
# Preallocate the REXI matrices
#
#p.runtime.rexi_sphere_preallocation = 1
# Leave instability checks activated
p.runtime.instability_checks = 1
# Don't activate them for wallclock time studies since they are pretty costly!!!
#p.runtime.instability_checks = 0
p.runtime.viscosity = 0.0
p.runtime.sphere_extended_modes = 0
#
# allow including this file
#
if __name__ == "__main__":
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
###########
# Runge-Kutta
###########
['ln_erk', 2, 2, 0],
###########
# CN
###########
['lg_irk_lc_n_erk_ver0', 2, 2, 0],
['lg_irk_lc_n_erk_ver1', 2, 2, 0],
['l_irk_na_sl_nd_settls_ver1', 2, 2, 0],
['l_irk_na_sl_nd_settls_ver2', 2, 2, 0],
['lg_irk_na_sl_lc_nd_settls_ver1', 2, 2, 0],
['lg_irk_na_sl_lc_nd_settls_ver2', 2, 2, 0],
]
#
# Reference solution
#
p.reference_job_unique_id = None
if gen_reference_solution:
tsm = ts_methods[0]
p.runtime.timestep_size = params_timestep_size_reference
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = 1
pspace.num_threads_per_rank = params_pspace_num_cores_per_rank[-1]
pspace.num_ranks = 1
# Setup parallelization
p.setup_parallelization([pspace])
if verbose:
pspace.print()
p.parallelization.print()
p.parallelization.max_wallclock_seconds = estimateWallclockTime(p)
p.gen_jobscript_directory('job_benchref_'+p.getUniqueID())
# Use this as a reference job
p.reference_job_unique_id = p.job_unique_id
for tsm in ts_methods[1:]:
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
if len(tsm) > 4:
s = tsm[4]
p.runtime.load_from_dict(tsm[4])
tsm_name = tsm[0]
if 'ln_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit_
elif 'l_erk' in tsm_name or 'lg_erk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_explicit_
elif 'l_irk' in tsm_name or 'lg_irk' in tsm_name:
params_timestep_sizes = params_timestep_sizes_implicit_
elif '_sl' in tsm_name:
params_timestep_sizes = params_timestep_sizes_sl_
else:
print("Unable to identify time stepping method "+tsm_name)
sys.exit(1)
for (
pspace_num_cores_per_rank,
pspace_num_threads_per_rank,
p.runtime.timestep_size
) in product(
params_pspace_num_cores_per_rank,
params_pspace_num_threads_per_rank,
params_timestep_sizes
):
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = pspace_num_cores_per_rank
pspace.num_threads_per_rank = pspace_num_threads_per_rank
pspace.num_ranks = 1
pspace.setup()
p.setup_parallelization([pspace])
if verbose:
pspace.print()
p.parallelization.print()
p.parallelization.max_wallclock_seconds = estimateWallclockTime(p)
p.gen_jobscript_directory('job_bench_'+p.getUniqueID())
p.write_compilecommands()
| mit |
lokirius/python-for-android | python-build/python-libs/gdata/src/gdata/youtube/service.py | 141 | 57914 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YouTubeService extends GDataService to streamline YouTube operations.
YouTubeService: Provides methods to perform CRUD operations on YouTube feeds.
Extends GDataService.
"""
__author__ = ('[email protected] (Stephanie Liu), '
'[email protected] (Jochen Hartmann)')
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import os
import atom
import gdata
import gdata.service
import gdata.youtube
YOUTUBE_SERVER = 'gdata.youtube.com'
YOUTUBE_SERVICE = 'youtube'
YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin'
YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime',
'flv')
YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month',
'all_time')
YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating',
'relevance')
YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude')
YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6')
YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured',
'top_rated', 'most_viewed','watch_on_mobile')
YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users'
YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken'
YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos'
YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users'
YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists'
YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds'
YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated')
YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_viewed')
YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'recently_featured')
YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'watch_on_mobile')
YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'top_favorites')
YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_recent')
YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_discussed')
YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_linked')
YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS,
'most_responded')
YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas'
YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA
YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'complaint-reasons.cat')
YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA,
'subscriptiontypes.cat')
YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS',
'RIGHTS', 'SPAM')
YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected')
YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family')
UNKOWN_ERROR = 1000
YOUTUBE_BAD_REQUEST = 400
YOUTUBE_CONFLICT = 409
YOUTUBE_INTERNAL_SERVER_ERROR = 500
YOUTUBE_INVALID_ARGUMENT = 601
YOUTUBE_INVALID_CONTENT_TYPE = 602
YOUTUBE_NOT_A_VIDEO = 603
YOUTUBE_INVALID_KIND = 604
class Error(Exception):
"""Base class for errors within the YouTube service."""
pass
class RequestError(Error):
"""Error class that is thrown in response to an invalid HTTP Request."""
pass
class YouTubeError(Error):
"""YouTube service specific error class."""
pass
class YouTubeService(gdata.service.GDataService):
"""Client for the YouTube service.
Performs all documented Google Data YouTube API functions, such as inserting,
updating and deleting videos, comments, playlist, subscriptions etc.
YouTube Service requires authentication for any write, update or delete
actions.
Attributes:
email: An optional string identifying the user. Required only for
authenticated actions.
password: An optional string identifying the user's password.
source: An optional string identifying the name of your application.
server: An optional address of the YouTube API server. gdata.youtube.com
is provided as the default value.
additional_headers: An optional dictionary containing additional headers
to be passed along with each request. Use to store developer key.
client_id: An optional string identifying your application, required for
authenticated requests, along with a developer key.
developer_key: An optional string value. Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
"""
def __init__(self, email=None, password=None, source=None,
server=YOUTUBE_SERVER, additional_headers=None, client_id=None,
developer_key=None, **kwargs):
"""Creates a client for the YouTube service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'gdata.youtube.com'.
client_id: string (optional) Identifies your application, required for
authenticated requests, along with a developer key.
developer_key: string (optional) Register your application at
http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
if developer_key and not client_id:
raise YouTubeError('You must also specify the clientId')
gdata.service.GDataService.__init__(
self, email=email, password=password, service=YOUTUBE_SERVICE,
source=source, server=server, additional_headers=additional_headers,
**kwargs)
if client_id is not None and developer_key is not None:
self.additional_headers['X-Gdata-Client'] = client_id
self.additional_headers['X-GData-Key'] = 'key=%s' % developer_key
self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL
def GetYouTubeVideoFeed(self, uri):
"""Retrieve a YouTubeVideoFeed.
Args:
uri: A string representing the URI of the feed that is to be retrieved.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetYouTubeVideoEntry(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoEntry.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the entry that is to
be retrieved.
video_id: An optional string representing the ID of the video.
Returns:
A YouTubeVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoEntry() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoEntry() method')
elif video_id and not uri:
uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id)
return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString)
def GetYouTubeContactFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeContactFeed.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the contact feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubeContactFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeContactFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts')
return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString)
def GetYouTubeContactEntry(self, uri):
"""Retrieve a YouTubeContactEntry.
Args:
uri: A string representing the URI of the contact entry that is to
be retrieved.
Returns:
A YouTubeContactEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString)
def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoCommentFeed.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the comment feed that
is to be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the comment feed.
Returns:
A YouTubeVideoCommentFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoCommentFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoCommentFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString)
def GetYouTubeVideoCommentEntry(self, uri):
"""Retrieve a YouTubeVideoCommentEntry.
Args:
uri: A string representing the URI of the comment entry that is to
be retrieved.
Returns:
A YouTubeCommentEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString)
def GetYouTubeUserFeed(self, uri=None, username=None):
"""Retrieve a YouTubeVideoFeed of user uploaded videos
Either a uri or a username must be provided. This will retrieve list
of videos uploaded by specified user. The uri will be of format
"http://gdata.youtube.com/feeds/api/users/{username}/uploads".
Args:
uri: An optional string representing the URI of the user feed that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserFeed() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserFeed() method')
elif username and not uri:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads')
return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString)
def GetYouTubeUserEntry(self, uri=None, username=None):
"""Retrieve a YouTubeUserEntry.
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the user entry that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserEntry if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserEntry() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserEntry() method')
elif username and not uri:
uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username)
return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString)
def GetYouTubePlaylistFeed(self, uri=None, username='default'):
"""Retrieve a YouTubePlaylistFeed (a feed of playlists for a user).
Either a uri or a username must be provided.
Args:
uri: An optional string representing the URI of the playlist feed that
is to be retrieved.
username: An optional string representing the username. Defaults to the
currently authenticated user.
Returns:
A YouTubePlaylistFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubePlaylistFeed() method.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists')
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString)
def GetYouTubePlaylistEntry(self, uri):
"""Retrieve a YouTubePlaylistEntry.
Args:
uri: A string representing the URI of the playlist feed that is to
be retrieved.
Returns:
A YouTubePlaylistEntry if successfully retrieved.
"""
return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString)
def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None):
"""Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist).
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the playlist video feed
that is to be retrieved.
playlist_id: An optional string representing the Id of the playlist whose
playlist video feed is to be retrieved.
Returns:
A YouTubePlaylistVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a playlist_id to the
GetYouTubePlaylistVideoFeed() method.
"""
if uri is None and playlist_id is None:
raise YouTubeError('You must provide at least a uri or a playlist_id '
'to the GetYouTubePlaylistVideoFeed() method')
elif playlist_id and not uri:
uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id)
return self.Get(
uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString)
def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeVideoResponseFeed.
Either a uri or a playlist_id must be provided.
Args:
uri: An optional string representing the URI of the video response feed
that is to be retrieved.
video_id: An optional string representing the ID of the video whose
response feed is to be retrieved.
Returns:
A YouTubeVideoResponseFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoResponseFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeVideoResponseFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString)
def GetYouTubeVideoResponseEntry(self, uri):
"""Retrieve a YouTubeVideoResponseEntry.
Args:
uri: A string representing the URI of the video response entry that
is to be retrieved.
Returns:
A YouTubeVideoResponseEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString)
def GetYouTubeSubscriptionFeed(self, uri=None, username='default'):
"""Retrieve a YouTubeSubscriptionFeed.
Either the uri of the feed or a username must be provided.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
username: An optional string representing the username whose subscription
feed is to be retrieved. Defaults to the currently authenticted user.
Returns:
A YouTubeVideoSubscriptionFeed if successfully retrieved.
"""
if uri is None:
uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions')
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString)
def GetYouTubeSubscriptionEntry(self, uri):
"""Retrieve a YouTubeSubscriptionEntry.
Args:
uri: A string representing the URI of the entry that is to be retrieved.
Returns:
A YouTubeVideoSubscriptionEntry if successfully retrieved.
"""
return self.Get(
uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None):
"""Retrieve a YouTubeRelatedVideoFeed.
Either a uri for the feed or a video_id is required.
Args:
uri: An optional string representing the URI of the feed that is to
be retrieved.
video_id: An optional string representing the ID of the video for which
to retrieve the related video feed.
Returns:
A YouTubeRelatedVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeRelatedVideoFeed() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the GetYouTubeRelatedVideoFeed() method')
elif video_id and not uri:
uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related')
return self.Get(
uri, converter=gdata.youtube.YouTubeVideoFeedFromString)
def GetTopRatedVideoFeed(self):
"""Retrieve the 'top_rated' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI)
def GetMostViewedVideoFeed(self):
"""Retrieve the 'most_viewed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI)
def GetRecentlyFeaturedVideoFeed(self):
"""Retrieve the 'recently_featured' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI)
def GetWatchOnMobileVideoFeed(self):
"""Retrieve the 'watch_on_mobile' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI)
def GetTopFavoritesVideoFeed(self):
"""Retrieve the 'top_favorites' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI)
def GetMostRecentVideoFeed(self):
"""Retrieve the 'most_recent' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI)
def GetMostDiscussedVideoFeed(self):
"""Retrieve the 'most_discussed' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI)
def GetMostLinkedVideoFeed(self):
"""Retrieve the 'most_linked' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI)
def GetMostRespondedVideoFeed(self):
"""Retrieve the 'most_responded' standard video feed.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI)
def GetUserFavoritesFeed(self, username='default'):
"""Retrieve the favorites feed for a given user.
Args:
username: An optional string representing the username whose favorites
feed is to be retrieved. Defaults to the currently authenticated user.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username,
'favorites')
return self.GetYouTubeVideoFeed(favorites_feed_uri)
def InsertVideoEntry(self, video_entry, filename_or_handle,
youtube_username='default',
content_type='video/quicktime'):
"""Upload a new video to YouTube using the direct upload mechanism.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload.
filename_or_handle: A file-like object or file name where the video
will be read from.
youtube_username: An optional string representing the username into whose
account this video is to be uploaded to. Defaults to the currently
authenticated user.
content_type: An optional string representing internet media type
(a.k.a. mime type) of the media object. Currently the YouTube API
supports these types:
o video/mpeg
o video/quicktime
o video/x-msvideo
o video/mp4
o video/x-flv
Returns:
The newly created YouTubeVideoEntry if successful.
Raises:
AssertionError: video_entry must be a gdata.youtube.VideoEntry instance.
YouTubeError: An error occurred trying to read the video file provided.
gdata.service.RequestError: An error occurred trying to upload the video
to the API server.
"""
# We need to perform a series of checks on the video_entry and on the
# file that we plan to upload, such as checking whether we have a valid
# video_entry and that the file is the correct type and readable, prior
# to performing the actual POST request.
try:
assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry))
except AssertionError:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT,
'body':'`video_entry` must be a gdata.youtube.VideoEntry instance',
'reason':'Found %s, not VideoEntry' % type(video_entry)
})
majtype, mintype = content_type.split('/')
try:
assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' %
['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]})
if (isinstance(filename_or_handle, (str, unicode))
and os.path.exists(filename_or_handle)):
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):
import StringIO
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0)
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'video'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else:
raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body':
'`filename_or_handle` must be a path name or a file-like object',
'reason': ('Found %s, not path name or object '
'with a .read() method' % type(filename_or_handle))})
upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username,
'uploads')
self.additional_headers['Slug'] = mediasource.file_name
# Using a nested try statement to retain Python 2.4 compatibility
try:
try:
return self.Post(video_entry, uri=upload_uri, media_source=mediasource,
converter=gdata.youtube.YouTubeVideoEntryFromString)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
finally:
del(self.additional_headers['Slug'])
def CheckUploadStatus(self, video_entry=None, video_id=None):
"""Check upload status on a recently uploaded video entry.
Needs authentication. Either video_entry or video_id must be provided.
Args:
video_entry: An optional YouTubeVideoEntry whose upload status to check
video_id: An optional string representing the ID of the uploaded video
whose status is to be checked.
Returns:
A tuple containing (video_upload_state, detailed_message) or None if
no status information is found.
Raises:
YouTubeError: You must provide at least a video_entry or a video_id to the
CheckUploadStatus() method.
"""
if video_entry is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the CheckUploadStatus() method')
elif video_id and not video_entry:
video_entry = self.GetYouTubeVideoEntry(video_id=video_id)
control = video_entry.control
if control is not None:
draft = control.draft
if draft is not None:
if draft.text == 'yes':
yt_state = control.extension_elements[0]
if yt_state is not None:
state_value = yt_state.attributes['name']
message = ''
if yt_state.text is not None:
message = yt_state.text
return (state_value, message)
def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI):
"""Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to upload (meta-data only).
uri: An optional string representing the URI from where to fetch the
token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI.
Returns:
A tuple containing the URL to which to post your video file, along
with the youtube token that must be included with your upload in the
form of: (post_url, youtube_token).
"""
try:
response = self.Post(video_entry, uri)
except gdata.service.RequestError, e:
raise YouTubeError(e.args[0])
tree = ElementTree.fromstring(response)
for child in tree:
if child.tag == 'url':
post_url = child.text
elif child.tag == 'token':
youtube_token = child.text
return (post_url, youtube_token)
def UpdateVideoEntry(self, video_entry):
"""Updates a video entry's meta-data.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to update, containing updated
meta-data.
Returns:
An updated YouTubeVideoEntry on success or None.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Put(video_entry, uri=edit_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntry(self, video_entry):
"""Deletes a video entry.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to be deleted.
Returns:
True if entry was deleted successfully.
"""
for link in video_entry.link:
if link.rel == 'edit':
edit_uri = link.href
return self.Delete(edit_uri)
def AddRating(self, rating_value, video_entry):
"""Add a rating to a video entry.
Needs authentication.
Args:
rating_value: The integer value for the rating (between 1 and 5).
video_entry: The YouTubeVideoEntry to be rated.
Returns:
True if the rating was added successfully.
Raises:
YouTubeError: rating_value must be between 1 and 5 in AddRating().
"""
if rating_value < 1 or rating_value > 5:
raise YouTubeError('rating_value must be between 1 and 5 in AddRating()')
entry = gdata.GDataEntry()
rating = gdata.youtube.Rating(min='1', max='5')
rating.extension_attributes['name'] = 'value'
rating.extension_attributes['value'] = str(rating_value)
entry.extension_elements.append(rating)
for link in video_entry.link:
if link.rel == YOUTUBE_RATING_LINK_REL:
rating_uri = link.href
return self.Post(entry, uri=rating_uri)
def AddComment(self, comment_text, video_entry):
"""Add a comment to a video entry.
Needs authentication. Note that each comment that is posted must contain
the video entry that it is to be posted to.
Args:
comment_text: A string representing the text of the comment.
video_entry: The YouTubeVideoEntry to be commented on.
Returns:
True if the comment was added successfully.
"""
content = atom.Content(text=comment_text)
comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content)
comment_post_uri = video_entry.comments.feed_link[0].href
return self.Post(comment_entry, uri=comment_post_uri)
def AddVideoResponse(self, video_id_to_respond_to, video_response):
"""Add a video response.
Needs authentication.
Args:
video_id_to_respond_to: A string representing the ID of the video to be
responded to.
video_response: YouTubeVideoEntry to be posted as a response.
Returns:
True if video response was posted successfully.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to,
'responses')
return self.Post(video_response, uri=post_uri)
def DeleteVideoResponse(self, video_id, response_video_id):
"""Delete a video response.
Needs authentication.
Args:
video_id: A string representing the ID of video that contains the
response.
response_video_id: A string representing the ID of the video that was
posted as a response.
Returns:
True if video response was deleted succcessfully.
"""
delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses',
response_video_id)
return self.Delete(delete_uri)
def AddComplaint(self, complaint_text, complaint_term, video_id):
"""Add a complaint for a particular video entry.
Needs authentication.
Args:
complaint_text: A string representing the complaint text.
complaint_term: A string representing the complaint category term.
video_id: A string representing the ID of YouTubeVideoEntry to
complain about.
Returns:
True if posted successfully.
Raises:
YouTubeError: Your complaint_term is not valid.
"""
if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS:
raise YouTubeError('Your complaint_term is not valid')
content = atom.Content(text=complaint_text)
category = atom.Category(term=complaint_term,
scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME)
complaint_entry = gdata.GDataEntry(content=content, category=[category])
post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints')
return self.Post(complaint_entry, post_uri)
def AddVideoEntryToFavorites(self, video_entry, username='default'):
"""Add a video entry to a users favorite feed.
Needs authentication.
Args:
video_entry: The YouTubeVideoEntry to add.
username: An optional string representing the username to whose favorite
feed you wish to add the entry. Defaults to the currently
authenticated user.
Returns:
The posted YouTubeVideoEntry if successfully posted.
"""
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites')
return self.Post(video_entry, post_uri,
converter=gdata.youtube.YouTubeVideoEntryFromString)
def DeleteVideoEntryFromFavorites(self, video_id, username='default'):
"""Delete a video entry from the users favorite feed.
Needs authentication.
Args:
video_id: A string representing the ID of the video that is to be removed
username: An optional string representing the username of the user's
favorite feed. Defaults to the currently authenticated user.
Returns:
True if entry was successfully deleted.
"""
edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites',
video_id)
return self.Delete(edit_link)
def AddPlaylist(self, playlist_title, playlist_description,
playlist_private=None):
"""Add a new playlist to the currently authenticated users account.
Needs authentication.
Args:
playlist_title: A string representing the title for the new playlist.
playlist_description: A string representing the description of the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
Returns:
The YouTubePlaylistEntry if successfully posted.
"""
playlist_entry = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=playlist_title),
description=gdata.youtube.Description(text=playlist_description))
if playlist_private:
playlist_entry.private = gdata.youtube.Private()
playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default',
'playlists')
return self.Post(playlist_entry, playlist_post_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def UpdatePlaylist(self, playlist_id, new_playlist_title,
new_playlist_description, playlist_private=None,
username='default'):
"""Update a playlist with new meta-data.
Needs authentication.
Args:
playlist_id: A string representing the ID of the playlist to be updated.
new_playlist_title: A string representing a new title for the playlist.
new_playlist_description: A string representing a new description for the
playlist.
playlist_private: An optional boolean, set to True if the playlist is
to be private.
username: An optional string representing the username whose playlist is
to be updated. Defaults to the currently authenticated user.
Returns:
A YouTubePlaylistEntry if the update was successful.
"""
updated_playlist = gdata.youtube.YouTubePlaylistEntry(
title=atom.Title(text=new_playlist_title),
description=gdata.youtube.Description(text=new_playlist_description))
if playlist_private:
updated_playlist.private = gdata.youtube.Private()
playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username,
playlist_id)
return self.Put(updated_playlist, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistEntryFromString)
def DeletePlaylist(self, playlist_uri):
"""Delete a playlist from the currently authenticated users playlists.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that is
to be deleted.
Returns:
True if successfully deleted.
"""
return self.Delete(playlist_uri)
def AddPlaylistVideoEntryToPlaylist(
self, playlist_uri, video_id, custom_video_title=None,
custom_video_description=None):
"""Add a video entry to a playlist, optionally providing a custom title
and description.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist to which this
video entry is to be added.
video_id: A string representing the ID of the video entry to add.
custom_video_title: An optional string representing a custom title for
the video (only shown on the playlist).
custom_video_description: An optional string representing a custom
description for the video (only shown on the playlist).
Returns:
A YouTubePlaylistVideoEntry if successfully posted.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
atom_id=atom.Id(text=video_id))
if custom_video_title:
playlist_video_entry.title = atom.Title(text=custom_video_title)
if custom_video_description:
playlist_video_entry.description = gdata.youtube.Description(
text=custom_video_description)
return self.Post(playlist_video_entry, playlist_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def UpdatePlaylistVideoEntryMetaData(
self, playlist_uri, playlist_entry_id, new_video_title,
new_video_description, new_video_position):
"""Update the meta data for a YouTubePlaylistVideoEntry.
Needs authentication.
Args:
playlist_uri: A string representing the URI of the playlist that contains
the entry to be updated.
playlist_entry_id: A string representing the ID of the entry to be
updated.
new_video_title: A string representing the new title for the video entry.
new_video_description: A string representing the new description for
the video entry.
new_video_position: An integer representing the new position on the
playlist for the video.
Returns:
A YouTubePlaylistVideoEntry if the update was successful.
"""
playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry(
title=atom.Title(text=new_video_title),
description=gdata.youtube.Description(text=new_video_description),
position=gdata.youtube.Position(text=str(new_video_position)))
playlist_put_uri = playlist_uri + '/' + playlist_entry_id
return self.Put(playlist_video_entry, playlist_put_uri,
converter=gdata.youtube.YouTubePlaylistVideoEntryFromString)
def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id):
"""Delete a playlist video entry from a playlist.
Needs authentication.
Args:
playlist_uri: A URI representing the playlist from which the playlist
video entry is to be removed from.
playlist_video_entry_id: A string representing id of the playlist video
entry that is to be removed.
Returns:
True if entry was successfully deleted.
"""
delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id)
return self.Delete(delete_uri)
def AddSubscriptionToChannel(self, username_to_subscribe_to,
my_username = 'default'):
"""Add a new channel subscription to the currently authenticated users
account.
Needs authentication.
Args:
username_to_subscribe_to: A string representing the username of the
channel to which we want to subscribe to.
my_username: An optional string representing the name of the user which
we want to subscribe. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successfully posted.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='channel')
subscription_username = gdata.youtube.Username(
text=username_to_subscribe_to)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToFavorites(self, username, my_username = 'default'):
"""Add a new subscription to a users favorites to the currently
authenticated user's account.
Needs authentication
Args:
username: A string representing the username of the user's favorite feed
to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='favorites')
subscription_username = gdata.youtube.Username(text=username)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
username=subscription_username)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def AddSubscriptionToQuery(self, query, my_username = 'default'):
"""Add a new subscription to a specific keyword query to the currently
authenticated user's account.
Needs authentication
Args:
query: A string representing the keyword query to subscribe to.
my_username: An optional string representing the username of the user
that is to be subscribed. Defaults to currently authenticated user.
Returns:
A new YouTubeSubscriptionEntry if successful.
"""
subscription_category = atom.Category(
scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME,
term='query')
subscription_query_string = gdata.youtube.QueryString(text=query)
subscription_entry = gdata.youtube.YouTubeSubscriptionEntry(
category=subscription_category,
query_string=subscription_query_string)
post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'subscriptions')
return self.Post(subscription_entry, post_uri,
converter=gdata.youtube.YouTubeSubscriptionEntryFromString)
def DeleteSubscription(self, subscription_uri):
"""Delete a subscription from the currently authenticated user's account.
Needs authentication.
Args:
subscription_uri: A string representing the URI of the subscription that
is to be deleted.
Returns:
True if deleted successfully.
"""
return self.Delete(subscription_uri)
def AddContact(self, contact_username, my_username='default'):
"""Add a new contact to the currently authenticated user's contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that you wish to add.
my_username: An optional string representing the username to whose
contact the new contact is to be added.
Returns:
A YouTubeContactEntry if added successfully.
"""
contact_category = atom.Category(
scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat',
term = 'Friends')
contact_username = gdata.youtube.Username(text=contact_username)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
username=contact_username)
contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts')
return self.Post(contact_entry, contact_post_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def UpdateContact(self, contact_username, new_contact_status,
new_contact_category, my_username='default'):
"""Update a contact, providing a new status and a new category.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be updated.
new_contact_status: A string representing the new status of the contact.
This can either be set to 'accepted' or 'rejected'.
new_contact_category: A string representing the new category for the
contact, either 'Friends' or 'Family'.
my_username: An optional string representing the username of the user
whose contact feed we are modifying. Defaults to the currently
authenticated user.
Returns:
A YouTubeContactEntry if updated succesfully.
Raises:
YouTubeError: New contact status must be within the accepted values. Or
new contact category must be within the accepted categories.
"""
if new_contact_status not in YOUTUBE_CONTACT_STATUS:
raise YouTubeError('New contact status must be one of %s' %
(' '.join(YOUTUBE_CONTACT_STATUS)))
if new_contact_category not in YOUTUBE_CONTACT_CATEGORY:
raise YouTubeError('New contact category must be one of %s' %
(' '.join(YOUTUBE_CONTACT_CATEGORY)))
contact_category = atom.Category(
scheme='http://gdata.youtube.com/schemas/2007/contact.cat',
term=new_contact_category)
contact_status = gdata.youtube.Status(text=new_contact_status)
contact_entry = gdata.youtube.YouTubeContactEntry(
category=contact_category,
status=contact_status)
contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Put(contact_entry, contact_put_uri,
converter=gdata.youtube.YouTubeContactEntryFromString)
def DeleteContact(self, contact_username, my_username='default'):
"""Delete a contact from a users contact feed.
Needs authentication.
Args:
contact_username: A string representing the username of the contact
that is to be deleted.
my_username: An optional string representing the username of the user's
contact feed from which to delete the contact. Defaults to the
currently authenticated user.
Returns:
True if the contact was deleted successfully
"""
contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username,
'contacts', contact_username)
return self.Delete(contact_edit_uri)
def _GetDeveloperKey(self):
"""Getter for Developer Key property.
Returns:
If the developer key has been set, a string representing the developer key
is returned or None.
"""
if 'X-GData-Key' in self.additional_headers:
return self.additional_headers['X-GData-Key'][4:]
else:
return None
def _SetDeveloperKey(self, developer_key):
"""Setter for Developer Key property.
Sets the developer key in the 'X-GData-Key' header. The actual value that
is set is 'key=' plus the developer_key that was passed.
"""
self.additional_headers['X-GData-Key'] = 'key=' + developer_key
developer_key = property(_GetDeveloperKey, _SetDeveloperKey,
doc="""The Developer Key property""")
def _GetClientId(self):
"""Getter for Client Id property.
Returns:
If the client_id has been set, a string representing it is returned
or None.
"""
if 'X-Gdata-Client' in self.additional_headers:
return self.additional_headers['X-Gdata-Client']
else:
return None
def _SetClientId(self, client_id):
"""Setter for Client Id property.
Sets the 'X-Gdata-Client' header.
"""
self.additional_headers['X-Gdata-Client'] = client_id
client_id = property(_GetClientId, _SetClientId,
doc="""The ClientId property""")
def Query(self, uri):
"""Performs a query and returns a resulting feed or entry.
Args:
uri: A string representing the URI of the feed that is to be queried.
Returns:
On success, a tuple in the form:
(boolean succeeded=True, ElementTree._Element result)
On failure, a tuple in the form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server's response})
"""
result = self.Get(uri)
return result
def YouTubeQuery(self, query):
"""Performs a YouTube specific query and returns a resulting feed or entry.
Args:
query: A Query object or one if its sub-classes (YouTubeVideoQuery,
YouTubeUserQuery or YouTubePlaylistQuery).
Returns:
Depending on the type of Query object submitted returns either a
YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the
Query object provided was not YouTube-related, a tuple is returned.
On success the tuple will be in this form:
(boolean succeeded=True, ElementTree._Element result)
On failure, the tuple will be in this form:
(boolean succeeded=False, {'status': HTTP status code from server,
'reason': HTTP reason from the server,
'body': HTTP body of the server response})
"""
result = self.Query(query.ToUri())
if isinstance(query, YouTubeVideoQuery):
return gdata.youtube.YouTubeVideoFeedFromString(result.ToString())
elif isinstance(query, YouTubeUserQuery):
return gdata.youtube.YouTubeUserFeedFromString(result.ToString())
elif isinstance(query, YouTubePlaylistQuery):
return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString())
else:
return result
class YouTubeVideoQuery(gdata.service.Query):
"""Subclasses gdata.service.Query to represent a YouTube Data API query.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions. Please refer to the API documentation for details.
Attributes:
vq: The vq parameter, which is only supported for video feeds, specifies a
search query term. Refer to API documentation for further details.
orderby: The orderby parameter, which is only supported for video feeds,
specifies the value that will be used to sort videos in the search
result set. Valid values for this parameter are relevance, published,
viewCount and rating.
time: The time parameter, which is only available for the top_rated,
top_favorites, most_viewed, most_discussed, most_linked and
most_responded standard feeds, restricts the search to videos uploaded
within the specified time. Valid values for this parameter are today
(1 day), this_week (7 days), this_month (1 month) and all_time.
The default value for this parameter is all_time.
format: The format parameter specifies that videos must be available in a
particular video format. Refer to the API documentation for details.
racy: The racy parameter allows a search result set to include restricted
content as well as standard content. Valid values for this parameter
are include and exclude. By default, restricted content is excluded.
lr: The lr parameter restricts the search to videos that have a title,
description or keywords in a specific language. Valid values for the lr
parameter are ISO 639-1 two-letter language codes.
restriction: The restriction parameter identifies the IP address that
should be used to filter videos that can only be played in specific
countries.
location: A string of geo coordinates. Note that this is not used when the
search is performed but rather to filter the returned videos for ones
that match to the location entered.
"""
def __init__(self, video_id=None, feed_type=None, text_query=None,
params=None, categories=None):
if feed_type in YOUTUBE_STANDARDFEEDS:
feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type)
elif feed_type is 'responses' or feed_type is 'comments' and video_id:
feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id,
feed_type)
else:
feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER)
gdata.service.Query.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
def _GetVideoQuery(self):
if 'vq' in self:
return self['vq']
else:
return None
def _SetVideoQuery(self, val):
self['vq'] = val
vq = property(_GetVideoQuery, _SetVideoQuery,
doc="""The video query (vq) query parameter""")
def _GetOrderBy(self):
if 'orderby' in self:
return self['orderby']
else:
return None
def _SetOrderBy(self, val):
if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS:
if val.startswith('relevance_lang_') is False:
raise YouTubeError('OrderBy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS))
self['orderby'] = val
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The orderby query parameter""")
def _GetTime(self):
if 'time' in self:
return self['time']
else:
return None
def _SetTime(self, val):
if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS:
raise YouTubeError('Time must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS))
self['time'] = val
time = property(_GetTime, _SetTime,
doc="""The time query parameter""")
def _GetFormat(self):
if 'format' in self:
return self['format']
else:
return None
def _SetFormat(self, val):
if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS:
raise YouTubeError('Format must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS))
self['format'] = val
format = property(_GetFormat, _SetFormat,
doc="""The format query parameter""")
def _GetRacy(self):
if 'racy' in self:
return self['racy']
else:
return None
def _SetRacy(self, val):
if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS:
raise YouTubeError('Racy must be one of: %s ' %
' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS))
self['racy'] = val
racy = property(_GetRacy, _SetRacy,
doc="""The racy query parameter""")
def _GetLanguageRestriction(self):
if 'lr' in self:
return self['lr']
else:
return None
def _SetLanguageRestriction(self, val):
self['lr'] = val
lr = property(_GetLanguageRestriction, _SetLanguageRestriction,
doc="""The lr (language restriction) query parameter""")
def _GetIPRestriction(self):
if 'restriction' in self:
return self['restriction']
else:
return None
def _SetIPRestriction(self, val):
self['restriction'] = val
restriction = property(_GetIPRestriction, _SetIPRestriction,
doc="""The restriction query parameter""")
def _GetLocation(self):
if 'location' in self:
return self['location']
else:
return None
def _SetLocation(self, val):
self['location'] = val
location = property(_GetLocation, _SetLocation,
doc="""The location query parameter""")
class YouTubeUserQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform user-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, username=None, feed_type=None, subscription_id=None,
text_query=None, params=None, categories=None):
uploads_favorites_playlists = ('uploads', 'favorites', 'playlists')
if feed_type is 'subscriptions' and subscription_id and username:
feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username,
feed_type, subscription_id)
elif feed_type is 'subscriptions' and not subscription_id and username:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
elif feed_type in uploads_favorites_playlists:
feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username,
feed_type)
else:
feed = "http://%s/feeds/users" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
class YouTubePlaylistQuery(YouTubeVideoQuery):
"""Subclasses YouTubeVideoQuery to perform playlist-specific queries.
Attributes are set dynamically via properties. Properties correspond to
the standard Google Data API query parameters with YouTube Data API
extensions.
"""
def __init__(self, playlist_id, text_query=None, params=None,
categories=None):
if playlist_id:
feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id)
else:
feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER)
YouTubeVideoQuery.__init__(self, feed, text_query=text_query,
params=params, categories=categories)
| apache-2.0 |
ds-hwang/chromium-crosswalk | build/download_gold_plugin.py | 19 | 1658 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download LLVM gold plugin from google storage."""
import find_depot_tools
import json
import os
import shutil
import subprocess
import sys
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
LLVM_BUILD_PATH = os.path.join(CHROME_SRC, 'third_party', 'llvm-build',
'Release+Asserts')
CLANG_UPDATE_PY = os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts',
'update.py')
CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip()
CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64'
def main():
targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION
remote_path = '%s/%s' % (CLANG_BUCKET, targz_name)
os.chdir(LLVM_BUILD_PATH)
# TODO(pcc): Fix gsutil.py cp url file < /dev/null 2>&0
# (currently aborts with exit code 1,
# https://github.com/GoogleCloudPlatform/gsutil/issues/289) or change the
# stdin->stderr redirect in update.py to do something else (crbug.com/494442).
subprocess.check_call(['python', GSUTIL_PATH,
'cp', remote_path, targz_name],
stderr=open('/dev/null', 'w'))
subprocess.check_call(['tar', 'xzf', targz_name])
os.remove(targz_name)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
ah-anssi/SecuML | SecuML/experiments/ExperimentFactory.py | 1 | 1834 | # SecuML
# Copyright (C) 2016-2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import json
from SecuML.experiments import experiment_db_tools
from SecuML.experiments.Tools import dir_exp_tools
experiment_factory = None
def getFactory():
global experiment_factory
if experiment_factory is None:
experiment_factory = ExperimentFactory()
return experiment_factory
class ExperimentFactory(object):
def __init__(self):
self.register = {}
def registerClass(self, class_name, class_obj):
self.register[class_name] = class_obj
def fromJson(self, experiment_id, session):
project, dataset = experiment_db_tools.getProjectDataset(session,
experiment_id)
obj_filename = dir_exp_tools.getExperimentConfigurationFilename(project,
dataset,
experiment_id)
with open(obj_filename, 'r') as obj_file:
obj_dict = json.load(obj_file)
class_name = obj_dict['__type__']
obj = self.register[class_name].fromJson(obj_dict, session)
return obj
| gpl-2.0 |
gabrieladt/kops-ec2-autoscaler | autoscaler/kube.py | 1 | 13310 | import datetime
import json
import logging
from dateutil.parser import parse as dateutil_parse
import pykube.exceptions
import autoscaler.utils as utils
logger = logging.getLogger(__name__)
class KubePodStatus(object):
RUNNING = 'Running'
PENDING = 'Pending'
CONTAINER_CREATING = 'ContainerCreating'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
_CORDON_LABEL = 'openai/cordoned-by-autoscaler'
class KubePod(object):
_DRAIN_GRACE_PERIOD = datetime.timedelta(seconds=60*60)
def __init__(self, pod):
self.original = pod
metadata = pod.obj['metadata']
self.name = metadata['name']
self.namespace = metadata['namespace']
self.node_name = pod.obj['spec'].get('nodeName')
self.status = pod.obj['status']['phase']
self.uid = metadata['uid']
self.selectors = pod.obj['spec'].get('nodeSelector', {})
# TODO: Remove this, after everyone has migrated off reservations
if 'openai.org/reservation-id' in self.selectors:
del self.selectors['openai.org/reservation-id']
self.labels = metadata.get('labels', {})
self.annotations = metadata.get('annotations', {})
self.owner = self.labels.get('owner', None)
self.creation_time = dateutil_parse(metadata['creationTimestamp'])
self.start_time = dateutil_parse(pod.obj['status']['startTime']) if 'startTime' in pod.obj['status'] else None
self.scheduled_time = None
for condition in pod.obj['status'].get('conditions', []):
if condition['type'] == 'PodScheduled' and condition['status'] == 'True':
self.scheduled_time = dateutil_parse(condition['lastTransitionTime'])
# TODO: refactor
requests = [c.get('resources', {}).get('requests', {}) for c in pod.obj['spec']['containers']]
resource_requests = {}
for d in requests:
for k, v in d.items():
unitless_v = utils.parse_SI(v)
resource_requests[k] = resource_requests.get(k, 0.0) + unitless_v
self.resources = KubeResource(pods=1, **resource_requests)
self.no_schedule_wildcard_toleration = False
self.no_execute_wildcard_toleration = False
self.no_schedule_existential_tolerations = set()
self.no_execute_existential_tolerations = set()
for toleration in pod.obj['spec'].get('tolerations', []):
if toleration.get('operator', 'Equal') == 'Exists':
effect = toleration.get('effect')
if effect is None or effect == 'NoSchedule':
if 'key' not in toleration:
self.no_schedule_wildcard_toleration = True
else:
self.no_schedule_existential_tolerations.add(toleration['key'])
if effect is None or effect == 'NoExecute':
if 'key' not in toleration:
self.no_execute_wildcard_toleration = True
else:
self.no_execute_existential_tolerations.add(toleration['key'])
else:
logger.warn("Equality tolerations not implemented. Pod {} has an equality toleration".format(pod))
def is_mirrored(self):
created_by = json.loads(self.annotations.get('kubernetes.io/created-by', '{}'))
is_daemonset = created_by.get('reference', {}).get('kind') == 'DaemonSet'
return is_daemonset or self.annotations.get('kubernetes.io/config.mirror')
def is_replicated(self):
created_by = json.loads(self.annotations.get('kubernetes.io/created-by', '{}'))
return created_by
def is_critical(self):
return utils.parse_bool_label(self.labels.get('openai/do-not-drain'))
def is_in_drain_grace_period(self):
"""
determines whether the pod is in a grace period for draining
this prevents us from draining pods that are too new
"""
return (self.scheduled_time and
(datetime.datetime.now(self.scheduled_time.tzinfo) - self.scheduled_time) < self._DRAIN_GRACE_PERIOD)
def is_drainable(self):
"""
a pod is considered drainable if:
- it's a daemon
- it's a non-critical replicated pod that has exceeded grace period
"""
return (self.is_mirrored() or
(self.is_replicated() and not self.is_critical() and not self.is_in_drain_grace_period()))
def delete(self):
logger.info('Deleting Pod %s/%s', self.namespace, self.name)
return self.original.delete()
def __hash__(self):
return hash(self.uid)
def __eq__(self, other):
return self.uid == other.uid
def __str__(self):
return 'KubePod({namespace}, {name})'.format(
namespace=self.namespace, name=self.name)
def __repr__(self):
return str(self)
def reverse_bytes(value):
assert len(value) % 2 == 0
result = ""
for i in range(len(value), 0, -2):
result += value[i - 2: i]
return result
class KubeNode(object):
_HEARTBEAT_GRACE_PERIOD = datetime.timedelta(seconds=60*60)
def __init__(self, node):
self.original = node
self.pykube_node = node
metadata = node.obj['metadata']
self.name = metadata['name']
self.instance_id, self.region, self.instance_type, self.provider = self._get_instance_data()
self.capacity = KubeResource(**node.obj['status']['capacity'])
self.used_capacity = KubeResource()
self.creation_time = dateutil_parse(metadata['creationTimestamp'])
last_heartbeat_time = self.creation_time
for condition in node.obj['status'].get('conditions', []):
if condition.get('type') == 'Ready':
last_heartbeat_time = dateutil_parse(condition['lastHeartbeatTime'])
self.last_heartbeat_time = last_heartbeat_time
self.no_schedule_taints = {}
self.no_execute_taints = {}
for taint in node.obj['spec'].get('taints', []):
try :
if taint['effect'] == 'NoSchedule':
self.no_schedule_taints[taint['key']] = taint['value']
if taint['effect'] == 'NoExecute':
self.no_execute_taints[taint['key']] = taint['value']
except:
logger.info("Taint value not founded %s", taint)
def _get_instance_data(self):
"""
returns a tuple (instance id, region, instance type)
"""
labels = self.original.obj['metadata'].get('labels', {})
instance_type = labels.get('aws/type', labels.get('beta.kubernetes.io/instance-type'))
provider = self.original.obj['spec'].get('providerID', '')
if provider.startswith('aws://'):
az, instance_id = tuple(provider.split('/')[-2:])
if az and instance_id:
return (instance_id, az[:-1], instance_type, 'aws')
if labels.get('aws/id') and labels.get('aws/az'):
instance_id = labels['aws/id']
region = labels['aws/az'][:-1]
return (instance_id, region, instance_type, 'aws')
assert provider.startswith('azure:////'), provider
# Id is in wrong order: https://azure.microsoft.com/en-us/blog/accessing-and-using-azure-vm-unique-id/
big_endian_vm_id = provider.replace('azure:////', '')
parts = big_endian_vm_id.split('-')
instance_id = '-'.join([reverse_bytes(parts[0]),
reverse_bytes(parts[1]),
reverse_bytes(parts[2]),
parts[3],
parts[4]]).lower()
instance_type = labels['azure/type']
return (instance_id, 'placeholder', instance_type, 'azure')
@property
def selectors(self):
return self.original.obj['metadata'].get('labels', {})
@property
def unschedulable(self):
return self.original.obj['spec'].get('unschedulable', False)
@property
def can_uncordon(self):
return utils.parse_bool_label(self.selectors.get(_CORDON_LABEL))
def drain(self, pods, notifier=None):
for pod in pods:
if pod.is_drainable() and not pod.is_mirrored():
pod.delete()
logger.info("drained %s", self)
if notifier:
notifier.notify_drained_node(self, pods)
def uncordon(self):
if not utils.parse_bool_label(self.selectors.get(_CORDON_LABEL)):
logger.debug('uncordon %s ignored', self)
return False
try:
self.original.reload()
self.original.obj['spec']['unschedulable'] = False
self.original.update()
logger.info("uncordoned %s", self)
return True
except pykube.exceptions.HTTPError as ex:
logger.info("uncordon failed %s %s", self, ex)
return False
def cordon(self):
try:
self.original.reload()
self.original.obj['spec']['unschedulable'] = True
self.original.obj['metadata'].setdefault('labels', {})[_CORDON_LABEL] = 'true'
self.original.update()
logger.info("cordoned %s", self)
return True
except pykube.exceptions.HTTPError as ex:
logger.info("cordon failed %s %s", self, ex)
return False
def delete(self):
try:
self.original.delete()
logger.info("deleted %s", self)
return True
except pykube.exceptions.HTTPError as ex:
logger.info("delete failed %s %s", self, ex)
return False
def count_pod(self, pod):
assert isinstance(pod, KubePod)
self.used_capacity += pod.resources
def can_fit(self, resources):
assert isinstance(resources, KubeResource)
left = self.capacity - (self.used_capacity + resources)
return left.possible
def is_match(self, pod: KubePod):
"""
whether this node matches all the selectors on the pod
"""
for label, value in pod.selectors.items():
if self.selectors.get(label) != value:
return False
for key in self.no_schedule_taints:
if not (pod.no_schedule_wildcard_toleration or key in pod.no_schedule_existential_tolerations):
return False
for key in self.no_execute_taints:
if not (pod.no_execute_wildcard_toleration or key in pod.no_execute_existential_tolerations):
return False
return True
def is_managed(self):
"""
an instance is managed if we know its instance ID in ec2.
"""
return self.instance_id is not None
def is_detached(self):
return utils.parse_bool_label(self.selectors.get('openai/detached'))
def is_dead(self):
return datetime.datetime.now(self.last_heartbeat_time.tzinfo) - self.last_heartbeat_time > self._HEARTBEAT_GRACE_PERIOD
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return "{}: {} ({})".format(self.name, self.instance_id,
utils.selectors_to_hash(self.selectors))
class KubeResource(object):
def __init__(self, **kwargs):
self.raw = dict((k, utils.parse_resource(v))
for (k, v) in kwargs.items())
def __add__(self, other):
keys = set(self.raw.keys()) | set(other.raw.keys())
raw_diff = dict((k, self.raw.get(k, 0) + other.raw.get(k, 0))
for k in keys)
return KubeResource(**raw_diff)
def __sub__(self, other):
keys = set(self.raw.keys()) | set(other.raw.keys())
raw_diff = dict((k, self.raw.get(k, 0) - other.raw.get(k, 0))
for k in keys)
return KubeResource(**raw_diff)
def __mul__(self, multiplier):
new_raw = dict((k, v * multiplier) for k, v in self.raw.items())
return KubeResource(**new_raw)
def __rmul__(self, multiplier):
return self.__mul__(multiplier)
def __cmp__(self, other):
"""
should return a negative integer if self < other,
zero if self == other, a positive integer if self > other.
we consider self to be greater than other if it exceeds
the resource amount in other in more resource types.
e.g. if self = {cpu: 4, memory: 1K, gpu: 1},
other = {cpu: 2, memory: 2K}, then self exceeds the resource
amount in other in both cpu and gpu, while other exceeds
the resource amount in self in only memory, so self > other.
"""
resource_diff = (self - other).raw
num_resource_types = len(resource_diff)
num_eq = sum(1 for v in resource_diff.values() if v == 0)
num_less = sum(1 for v in resource_diff.values() if v < 0)
num_more = num_resource_types - num_eq - num_less
return num_more - num_less
def __str__(self):
return str(self.raw)
def get(self, key, default=None):
return self.raw.get(key, default)
@property
def possible(self):
return all([x >= 0 for x in self.raw.values()])
| mit |
AgostonSzepessy/servo | tests/wpt/css-tests/tools/runner/report.py | 278 | 9660 | import argparse
import json
import sys
from cgi import escape
from collections import defaultdict
import types
def html_escape(item, escape_quote=False):
if isinstance(item, types.StringTypes):
rv = escape(item)
if escape_quote:
rv = rv.replace('"', """)
return rv
else:
return item
class Raw(object):
"""Simple wrapper around a string to stop it being escaped by html_escape"""
def __init__(self, value):
self.value = value
def __unicode__(self):
return unicode(self.value)
class Node(object):
"""Node structure used when building HTML"""
def __init__(self, name, attrs, children):
#Need list of void elements
self.name = name
self.attrs = attrs
self.children = children
def __unicode__(self):
if self.attrs:
#Need to escape
attrs_unicode = " " + " ".join("%s=\"%s\"" % (html_escape(key),
html_escape(value,
escape_quote=True))
for key, value in self.attrs.iteritems())
else:
attrs_unicode = ""
return "<%s%s>%s</%s>\n" % (self.name,
attrs_unicode,
"".join(unicode(html_escape(item))
for item in self.children),
self.name)
def __str__(self):
return unicode(self).encode("utf8")
class RootNode(object):
"""Special Node representing the document root"""
def __init__(self, *children):
self.children = ["<!DOCTYPE html>"] + list(children)
def __unicode__(self):
return "".join(unicode(item) for item in self.children)
def __str__(self):
return unicode(self).encode("utf8")
def flatten(iterable):
"""Flatten a list of lists by one level so that
[1,["abc"], "def",[2, [3]]]
becomes
[1, "abc", "def", 2, [3]]"""
rv = []
for item in iterable:
if hasattr(item, "__iter__") and not isinstance(item, types.StringTypes):
rv.extend(item)
else:
rv.append(item)
return rv
class HTML(object):
"""Simple HTML templating system. An instance of this class can create
element nodes by calling methods with the same name as the element,
passing in children as positional arguments or as a list, and attributes
as keyword arguments, with _ replacing - and trailing _ for python keywords
e.g.
h = HTML()
print h.html(
html.head(),
html.body([html.h1("Hello World!")], class_="body-class")
)
Would give
<!DOCTYPE html><html><head></head><body class="body-class"><h1>Hello World!</h1></body></html>"""
def __getattr__(self, name):
def make_html(self, *content, **attrs):
for attr_name in attrs.keys():
if "_" in attr_name:
new_name = attr_name.replace("_", "-")
if new_name.endswith("-"):
new_name = new_name[:-1]
attrs[new_name] = attrs.pop(attr_name)
return Node(name, attrs, flatten(content))
method = types.MethodType(make_html, self, HTML)
setattr(self, name, method)
return method
def __call__(self, *children):
return RootNode(*flatten(children))
h = HTML()
class TestResult(object):
"""Simple holder for the results of a single test in a single UA"""
def __init__(self, test):
self.test = test
self.results = {}
def __cmp__(self, other):
return self.test == other.test
def __hash__(self):
return hash(self.test)
def load_data(args):
"""Load data treating args as a list of UA name, filename pairs"""
pairs = []
for i in xrange(0, len(args), 2):
pairs.append(args[i:i+2])
rv = {}
for UA, filename in pairs:
with open(filename) as f:
rv[UA] = json.load(f)
return rv
def test_id(id):
"""Convert a test id in JSON into an immutable object that
can be used as a dictionary key"""
if isinstance(id, list):
return tuple(id)
else:
return id
def all_tests(data):
tests = defaultdict(set)
for UA, results in data.iteritems():
for result in results["results"]:
id = test_id(result["test"])
tests[id] |= set(subtest["name"] for subtest in result["subtests"])
return tests
def group_results(data):
"""Produce a list of UAs and a dictionary mapping specific tests to their
status in all UAs e.g.
["UA1", "UA2"], {"test_id":{"harness":{"UA1": (status1, message1),
"UA2": (status2, message2)},
"subtests":{"subtest1": "UA1": (status1-1, message1-1),
"UA2": (status2-1, message2-1)}}}
Status and message are None if the test didn't run in a particular UA.
Message is None if the test didn't produce a message"""
tests = all_tests(data)
UAs = data.keys()
def result():
return {
"harness": dict((UA, (None, None)) for UA in UAs),
"subtests": None # init this later
}
results_by_test = defaultdict(result)
for UA, results in data.iteritems():
for test_data in results["results"]:
id = test_id(test_data["test"])
result = results_by_test[id]
if result["subtests"] is None:
result["subtests"] = dict(
(name, dict((UA, (None, None)) for UA in UAs)) for name in tests[id]
)
result["harness"][UA] = (test_data["status"], test_data["message"])
for subtest in test_data["subtests"]:
result["subtests"][subtest["name"]][UA] = (subtest["status"],
subtest["message"])
return UAs, results_by_test
def status_cell(status, message=None):
"""Produce a table cell showing the status of a test"""
status = status if status is not None else "NONE"
kwargs = {}
if message:
kwargs["title"] = message
status_text = status.title()
return h.td(status_text, class_="status " + status,
**kwargs)
def test_link(test_id, subtest=None):
"""Produce an <a> element linking to a test"""
if isinstance(test_id, types.StringTypes):
rv = [h.a(test_id, href=test_id)]
else:
rv = [h.a(test_id[0], href=test_id[0]),
" %s " % test_id[1],
h.a(test_id[2], href=test_id[2])]
if subtest is not None:
rv.append(" [%s]" % subtest)
return rv
def summary(UAs, results_by_test):
"""Render the implementation report summary"""
not_passing = []
for test, results in results_by_test.iteritems():
if not any(item[0] in ("PASS", "OK") for item in results["harness"].values()):
not_passing.append((test, None))
for subtest_name, subtest_results in results["subtests"].iteritems():
if not any(item[0] == "PASS" for item in subtest_results.values()):
not_passing.append((test, subtest_name))
if not_passing:
rv = [
h.p("The following tests failed to pass in all UAs:"),
h.ul([h.li(test_link(test, subtest))
for test, subtest in not_passing])
]
else:
rv = "All tests passed in at least one UA"
return rv
def result_rows(UAs, test, result):
"""Render the results for each test run"""
yield h.tr(
h.td(
test_link(test),
rowspan=(1 + len(result["subtests"]))
),
h.td(),
[status_cell(status, message)
for UA, (status, message) in sorted(result["harness"].items())],
class_="test"
)
for name, subtest_result in sorted(result["subtests"].iteritems()):
yield h.tr(
h.td(name),
[status_cell(status, message)
for UA, (status, message) in sorted(subtest_result.items())],
class_="subtest"
)
def result_bodies(UAs, results_by_test):
return [h.tbody(result_rows(UAs, test, result))
for test, result in sorted(results_by_test.iteritems())]
def generate_html(UAs, results_by_test):
"""Generate all the HTML output"""
doc = h(h.html([
h.head(h.meta(charset="utf8"),
h.title("Implementation Report"),
h.link(href="report.css", rel="stylesheet")),
h.body(h.h1("Implementation Report"),
h.h2("Summary"),
summary(UAs, results_by_test),
h.h2("Full Results"),
h.table(
h.thead(
h.tr(
h.th("Test"),
h.th("Subtest"),
[h.th(UA) for UA in sorted(UAs)]
)
),
result_bodies(UAs, results_by_test)
)
)
]))
return doc
def main(filenames):
data = load_data(filenames)
UAs, results_by_test = group_results(data)
return generate_html(UAs, results_by_test)
if __name__ == "__main__":
if not sys.argv[1:]:
print """Please supply a list of UA name, filename pairs e.g.
python report.py Firefox firefox.json Chrome chrome.json IE internet_explorer.json"""
print main(sys.argv[1:])
| mpl-2.0 |
zaccoz/odoo | addons/l10n_be_intrastat/__openerp__.py | 257 | 1631 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgian Intrastat Declaration',
'version': '1.0',
'category': 'Reporting',
'description': """
Generates Intrastat XML report for declaration
Based on invoices.
""",
'author': 'Odoo SA',
'depends': ['report_intrastat', 'sale_stock', 'account_accountant', 'l10n_be'],
'data': [
'data/regions.xml',
'data/report.intrastat.code.csv',
'data/transaction.codes.xml',
'data/transport.modes.xml',
'security/groups.xml',
'security/ir.model.access.csv',
'l10n_be_intrastat.xml',
'wizard/l10n_be_intrastat_xml_view.xml',
],
'installable': True,
}
| agpl-3.0 |
qrkourier/ansible | lib/ansible/modules/system/sefcontext.py | 56 | 7940 | #!/usr/bin/python
# (c) 2016, Dag Wieers <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: sefcontext
short_description: Manages SELinux file context mapping definitions
description:
- Manages SELinux file context mapping definitions.
- Similar to the C(semanage fcontext) command.
version_added: '2.2'
options:
target:
description:
- Target path (expression).
required: yes
aliases: [ path ]
ftype:
description:
- File type.
default: a
setype:
description:
- SELinux type for the specified target.
required: yes
seuser:
description:
- SELinux user for the specified target.
selevel:
description:
- SELinux range for the specified target.
aliases: [ serange ]
state:
description:
- Desired boolean value.
choices: [ absent, present ]
default: present
reload:
description:
- Reload SELinux policy after commit.
type: bool
default: 'yes'
notes:
- The changes are persistent across reboots
requirements:
- libselinux-python
- policycoreutils-python
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
# Allow apache to modify files in /srv/git_repos
- sefcontext:
target: '/srv/git_repos(/.*)?'
setype: httpd_git_rw_content_t
state: present
'''
RETURN = r'''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_native
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
HAVE_SEOBJECT = False
# Add missing entries (backward compatible)
if HAVE_SEOBJECT:
seobject.file_types.update(dict(
a=seobject.SEMANAGE_FCONTEXT_ALL,
b=seobject.SEMANAGE_FCONTEXT_BLOCK,
c=seobject.SEMANAGE_FCONTEXT_CHAR,
d=seobject.SEMANAGE_FCONTEXT_DIR,
f=seobject.SEMANAGE_FCONTEXT_REG,
l=seobject.SEMANAGE_FCONTEXT_LINK,
p=seobject.SEMANAGE_FCONTEXT_PIPE,
s=seobject.SEMANAGE_FCONTEXT_SOCK,
))
# Make backward compatible
option_to_file_type_str = dict(
a='all files',
b='block device',
c='character device',
d='directory',
f='regular file',
l='symbolic link',
p='named pipe',
s='socket file',
)
def semanage_fcontext_exists(sefcontext, target, ftype):
''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
# Beware that records comprise of a string representation of the file_type
record = (target, option_to_file_type_str[ftype])
records = sefcontext.get_all()
try:
return records[record]
except KeyError:
return None
def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
''' Add or modify SELinux file context mapping definition to the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Modify existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if seuser is None:
seuser = orig_seuser
if serange is None:
serange = orig_serange
if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
if not module.check_mode:
sefcontext.modify(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Change to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
else:
# Add missing entry
if seuser is None:
seuser = 'system_u'
if serange is None:
serange = 's0'
if not module.check_mode:
sefcontext.add(target, setype, ftype, serange, seuser)
changed = True
if module._diff:
prepared_diff += '# Addition to semanage file context mappings\n'
prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
''' Delete SELinux file context mapping definition from the policy. '''
changed = False
prepared_diff = ''
try:
sefcontext = seobject.fcontextRecords(sestore)
sefcontext.set_reload(do_reload)
exists = semanage_fcontext_exists(sefcontext, target, ftype)
if exists:
# Remove existing entry
orig_seuser, orig_serole, orig_setype, orig_serange = exists
if not module.check_mode:
sefcontext.delete(target, ftype)
changed = True
if module._diff:
prepared_diff += '# Deletion to semanage file context mappings\n'
prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
except Exception:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
if module._diff and prepared_diff:
result['diff'] = dict(prepared=prepared_diff)
module.exit_json(changed=changed, **result)
def main():
module = AnsibleModule(
argument_spec=dict(
target=dict(required=True, aliases=['path']),
ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()),
setype=dict(type='str', required=True),
seuser=dict(type='str'),
selevel=dict(type='str', aliases=['serange']),
state=dict(type='str', default='present', choices=['absent', 'present']),
reload=dict(type='bool', default=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python")
if not HAVE_SEOBJECT:
module.fail_json(msg="This module requires policycoreutils-python")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
target = module.params['target']
ftype = module.params['ftype']
setype = module.params['setype']
seuser = module.params['seuser']
serange = module.params['selevel']
state = module.params['state']
do_reload = module.params['reload']
result = dict(target=target, ftype=ftype, setype=setype, state=state)
if state == 'present':
semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
elif state == 'absent':
semanage_fcontext_delete(module, result, target, ftype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
if __name__ == '__main__':
main()
| gpl-3.0 |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
zahodi/ansible | lib/ansible/utils/module_docs_fragments/dellos10.py | 42 | 2591 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
required: false
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
required: false
default: 10
provider:
description:
- Convenience method that allows all I(dellos10) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
ceibal-tatu/software-center | tests/test_login_backend.py | 3 | 1423 | import os
import unittest
from tests.utils import (
setup_test_env,
)
setup_test_env()
from softwarecenter.backend.login import get_login_backend
from softwarecenter.backend.login_impl.login_sso import (
LoginBackendDbusSSO)
from softwarecenter.backend.login_impl.login_fake import (
LoginBackendDbusSSOFake,
)
class TestLoginBackend(unittest.TestCase):
""" tests the login backend stuff """
def test_fake_and_real_provide_similar_methods(self):
""" test if the real and fake login provide the same functions """
login_real = LoginBackendDbusSSO
login_fake = LoginBackendDbusSSOFake
# ensure that both fake and real implement the same methods
self.assertEqual(
set([x for x in dir(login_real) if not x.startswith("_")]),
set([x for x in dir(login_fake) if not x.startswith("_")]))
def test_get_login_backend(self):
# test that we get the real one
self.assertEqual(type(get_login_backend(None, None, None)),
LoginBackendDbusSSO)
# test that we get the fake one
os.environ["SOFTWARE_CENTER_FAKE_REVIEW_API"] = "1"
self.assertEqual(type(get_login_backend(None, None, None)),
LoginBackendDbusSSOFake)
# clean the environment
del os.environ["SOFTWARE_CENTER_FAKE_REVIEW_API"]
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
Thraxis/SickRage | sickbeard/name_parser/parser.py | 3 | 24350 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import re
import os.path
import regexes
import sickbeard
from sickbeard import logger, helpers, scene_numbering, common, scene_exceptions, db
from sickrage.helper.common import remove_extension
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
from dateutil import parser
class NameParser(object):
ALL_REGEX = 0
NORMAL_REGEX = 1
ANIME_REGEX = 2
def __init__(self, file_name=True, showObj=None, tryIndexers=False, naming_pattern=False, parse_method = None):
self.file_name = file_name
self.showObj = showObj
self.tryIndexers = tryIndexers
self.naming_pattern = naming_pattern
if (self.showObj and not self.showObj.is_anime) or parse_method == 'normal':
self._compile_regexes(self.NORMAL_REGEX)
elif (self.showObj and self.showObj.is_anime) or parse_method == 'anime':
self._compile_regexes(self.ANIME_REGEX)
else:
self._compile_regexes(self.ALL_REGEX)
@staticmethod
def clean_series_name(series_name):
"""Cleans up series name by removing any . and _
characters, along with any trailing hyphens.
Is basically equivalent to replacing all _ and . with a
space, but handles decimal numbers in string, for example:
>>> cleanRegexedSeriesName("an.example.1.0.test")
'an example 1.0 test'
>>> cleanRegexedSeriesName("an_example_1.0_test")
'an example 1.0 test'
Stolen from dbr's tvnamer
"""
series_name = re.sub(r"(\D)\.(?!\s)(\D)", "\\1 \\2", series_name)
series_name = re.sub(r"(\d)\.(\d{4})", "\\1 \\2", series_name) # if it ends in a year then don't keep the dot
series_name = re.sub(r"(\D)\.(?!\s)", "\\1 ", series_name)
series_name = re.sub(r"\.(?!\s)(\D)", " \\1", series_name)
series_name = series_name.replace("_", " ")
series_name = re.sub(r"-$", "", series_name)
series_name = re.sub(r"^\[.*\]", "", series_name)
return series_name.strip()
def _compile_regexes(self, regexMode):
if regexMode == self.ANIME_REGEX:
dbg_str = u"ANIME"
uncompiled_regex = [regexes.anime_regexes]
elif regexMode == self.NORMAL_REGEX:
dbg_str = u"NORMAL"
uncompiled_regex = [regexes.normal_regexes]
else:
dbg_str = u"ALL"
uncompiled_regex = [regexes.normal_regexes, regexes.anime_regexes]
self.compiled_regexes = []
for regexItem in uncompiled_regex:
for cur_pattern_num, (cur_pattern_name, cur_pattern) in enumerate(regexItem):
try:
cur_regex = re.compile(cur_pattern, re.VERBOSE | re.IGNORECASE)
except re.error, errormsg:
logger.log(u"WARNING: Invalid episode_pattern using %s regexs, %s. %s" % (dbg_str, errormsg, cur_pattern))
else:
self.compiled_regexes.append((cur_pattern_num, cur_pattern_name, cur_regex))
def _parse_string(self, name):
if not name:
return
matches = []
bestResult = None
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
if not match:
continue
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'series_num' in named_groups and match.group('series_num'):
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 1
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'air_date' in named_groups:
air_date = match.group('air_date')
try:
result.air_date = parser.parse(air_date, fuzzy=True).date()
result.score += 1
except Exception:
continue
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
if 'version' in named_groups:
# assigns version to anime file if detected using anime regex. Non-anime regex receives -1
version = match.group('version')
if version:
result.version = version
else:
result.version = 1
else:
result.version = -1
matches.append(result)
if len(matches):
# pick best match with highest score based on placement
bestResult = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = helpers.get_show(bestResult.series_name, self.tryIndexers)
# confirm passed in show object indexer id matches result show object indexer id
if show:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
bestResult.show = show
elif not show and self.showObj:
bestResult.show = self.showObj
# if this is a naming pattern test or result doesn't have a show object then return best result
if not bestResult.show or self.naming_pattern:
return bestResult
# get quality
bestResult.quality = common.Quality.nameQuality(name, bestResult.show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []
# if we have an air-by-date show then get the real season/episode numbers
if bestResult.is_air_by_date:
airdate = bestResult.air_date.toordinal()
myDB = db.DBConnection()
sql_result = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?",
[bestResult.show.indexerid, bestResult.show.indexer, airdate])
season_number = None
episode_numbers = []
if sql_result:
season_number = int(sql_result[0][0])
episode_numbers = [int(sql_result[0][1])]
if not season_number or not len(episode_numbers):
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(bestResult.show.indexer).api_params.copy()
if bestResult.show.lang:
lINDEXER_API_PARMS['language'] = bestResult.show.lang
t = sickbeard.indexerApi(bestResult.show.indexer).indexer(**lINDEXER_API_PARMS)
epObj = t[bestResult.show.indexerid].airedOn(bestResult.air_date)[0]
season_number = int(epObj["seasonnumber"])
episode_numbers = [int(epObj["episodenumber"])]
except sickbeard.indexer_episodenotfound:
logger.log(u"Unable to find episode with date " + str(bestResult.air_date) + " for show " + bestResult.show.name + ", skipping", logger.WARNING)
episode_numbers = []
except sickbeard.indexer_error, e:
logger.log(u"Unable to contact " + sickbeard.indexerApi(bestResult.show.indexer).name + ": " + ex(e), logger.WARNING)
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif bestResult.show.is_anime and len(bestResult.ab_episode_numbers):
scene_season = scene_exceptions.get_scene_exception_by_name(bestResult.series_name)[1]
for epAbsNo in bestResult.ab_episode_numbers:
a = epAbsNo
if bestResult.show.is_scene:
a = scene_numbering.get_indexer_absolute_numbering(bestResult.show.indexerid,
bestResult.show.indexer, epAbsNo,
True, scene_season)
(s, e) = helpers.get_all_episodes_from_absolute_number(bestResult.show, [a])
new_absolute_numbers.append(a)
new_episode_numbers.extend(e)
new_season_numbers.append(s)
elif bestResult.season_number and len(bestResult.episode_numbers):
for epNo in bestResult.episode_numbers:
s = bestResult.season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
bestResult.season_number,
epNo)
if bestResult.show.is_anime:
a = helpers.get_absolute_number_from_season_and_episode(bestResult.show, s, e)
if a:
new_absolute_numbers.append(a)
new_episode_numbers.append(e)
new_season_numbers.append(s)
# need to do a quick sanity check heregex. It's possible that we now have episodes
# from more than one season (by tvdb numbering), and this is just too much
# for sickbeard, so we'd need to flag it.
new_season_numbers = list(set(new_season_numbers)) # remove duplicates
if len(new_season_numbers) > 1:
raise InvalidNameException("Scene numbering results episodes from "
"seasons %s, (i.e. more than one) and "
"sickrage does not support this. "
"Sorry." % (str(new_season_numbers)))
# I guess it's possible that we'd have duplicate episodes too, so lets
# eliminate them
new_episode_numbers = list(set(new_episode_numbers))
new_episode_numbers.sort()
# maybe even duplicate absolute numbers so why not do them as well
new_absolute_numbers = list(set(new_absolute_numbers))
new_absolute_numbers.sort()
if len(new_absolute_numbers):
bestResult.ab_episode_numbers = new_absolute_numbers
if len(new_season_numbers) and len(new_episode_numbers):
bestResult.episode_numbers = new_episode_numbers
bestResult.season_number = new_season_numbers[0]
if bestResult.show.is_scene:
logger.log(
u"Converted parsed result " + bestResult.original_name + " into " + str(bestResult).decode('utf-8',
'xmlcharrefreplace'),
logger.DEBUG)
# CPU sleep
time.sleep(0.02)
return bestResult
def _combine_results(self, first, second, attr):
# if the first doesn't exist then return the second or nothing
if not first:
if not second:
return None
else:
return getattr(second, attr)
# if the second doesn't exist then return the first
if not second:
return getattr(first, attr)
a = getattr(first, attr)
b = getattr(second, attr)
# if a is good use it
if a is not None or (isinstance(a, list) and a):
return a
# if not use b (if b isn't set it'll just be default)
else:
return b
@staticmethod
def _unicodify(obj, encoding="utf-8"):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding, 'replace')
return obj
@staticmethod
def _convert_number(org_number):
"""
Convert org_number into an integer
org_number: integer or representation of a number: string or unicode
Try force converting to int first, on error try converting from Roman numerals
returns integer or 0
"""
try:
# try forcing to int
if org_number:
number = int(org_number)
else:
number = 0
except Exception:
# on error try converting from Roman numerals
roman_to_int_map = (
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10),
('IX', 9), ('V', 5), ('IV', 4), ('I', 1)
)
roman_numeral = str(org_number).upper()
number = 0
index = 0
for numeral, integer in roman_to_int_map:
while roman_numeral[index:index + len(numeral)] == numeral:
number += integer
index += len(numeral)
return number
def parse(self, name, cache_result=True):
name = self._unicodify(name)
if self.naming_pattern:
cache_result = False
cached = name_parser_cache.get(name)
if cached:
return cached
# break it into parts if there are any (dirname, file name, extension)
dir_name, file_name = ek(os.path.split, name)
if self.file_name:
base_file_name = remove_extension(file_name)
else:
base_file_name = file_name
# set up a result to use
final_result = ParseResult(name)
# try parsing the file name
file_name_result = self._parse_string(base_file_name)
# use only the direct parent dir
dir_name = ek(os.path.basename, dir_name)
# parse the dirname for extra info if needed
dir_name_result = self._parse_string(dir_name)
# build the ParseResult object
final_result.air_date = self._combine_results(file_name_result, dir_name_result, 'air_date')
# anime absolute numbers
final_result.ab_episode_numbers = self._combine_results(file_name_result, dir_name_result, 'ab_episode_numbers')
# season and episode numbers
final_result.season_number = self._combine_results(file_name_result, dir_name_result, 'season_number')
final_result.episode_numbers = self._combine_results(file_name_result, dir_name_result, 'episode_numbers')
# if the dirname has a release group/show name I believe it over the filename
final_result.series_name = self._combine_results(dir_name_result, file_name_result, 'series_name')
final_result.extra_info = self._combine_results(dir_name_result, file_name_result, 'extra_info')
final_result.release_group = self._combine_results(dir_name_result, file_name_result, 'release_group')
final_result.version = self._combine_results(dir_name_result, file_name_result, 'version')
final_result.which_regex = []
if final_result == file_name_result:
final_result.which_regex = file_name_result.which_regex
elif final_result == dir_name_result:
final_result.which_regex = dir_name_result.which_regex
else:
if file_name_result:
final_result.which_regex += file_name_result.which_regex
if dir_name_result:
final_result.which_regex += dir_name_result.which_regex
final_result.show = self._combine_results(file_name_result, dir_name_result, 'show')
final_result.quality = self._combine_results(file_name_result, dir_name_result, 'quality')
if not final_result.show:
raise InvalidShowException(
"Unable to parse " + name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
# if there's no useful info in it then raise an exception
if final_result.season_number is None and not final_result.episode_numbers and final_result.air_date is None and not final_result.ab_episode_numbers and not final_result.series_name:
raise InvalidNameException("Unable to parse " + name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace'))
if cache_result:
name_parser_cache.add(name, final_result)
logger.log(u"Parsed " + name + " into " + str(final_result).decode('utf-8', 'xmlcharrefreplace'), logger.DEBUG)
return final_result
class ParseResult(object):
def __init__(self,
original_name,
series_name=None,
season_number=None,
episode_numbers=None,
extra_info=None,
release_group=None,
air_date=None,
ab_episode_numbers=None,
show=None,
score=None,
quality=None,
version=None
):
self.original_name = original_name
self.series_name = series_name
self.season_number = season_number
if not episode_numbers:
self.episode_numbers = []
else:
self.episode_numbers = episode_numbers
if not ab_episode_numbers:
self.ab_episode_numbers = []
else:
self.ab_episode_numbers = ab_episode_numbers
if not quality:
self.quality = common.Quality.UNKNOWN
else:
self.quality = quality
self.extra_info = extra_info
self.release_group = release_group
self.air_date = air_date
self.which_regex = []
self.show = show
self.score = score
self.version = version
def __eq__(self, other):
if not other:
return False
if self.series_name != other.series_name:
return False
if self.season_number != other.season_number:
return False
if self.episode_numbers != other.episode_numbers:
return False
if self.extra_info != other.extra_info:
return False
if self.release_group != other.release_group:
return False
if self.air_date != other.air_date:
return False
if self.ab_episode_numbers != other.ab_episode_numbers:
return False
if self.show != other.show:
return False
if self.score != other.score:
return False
if self.quality != other.quality:
return False
if self.version != other.version:
return False
return True
def __str__(self):
if self.series_name is not None:
to_return = self.series_name + u' - '
else:
to_return = u''
if self.season_number is not None:
to_return += 'S' + str(self.season_number).zfill(2)
if self.episode_numbers and len(self.episode_numbers):
for e in self.episode_numbers:
to_return += 'E' + str(e).zfill(2)
if self.is_air_by_date:
to_return += str(self.air_date)
if self.ab_episode_numbers:
to_return += ' [ABS: ' + str(self.ab_episode_numbers) + ']'
if self.version and self.is_anime is True:
to_return += ' [ANIME VER: ' + str(self.version) + ']'
if self.release_group:
to_return += ' [GROUP: ' + self.release_group + ']'
to_return += ' [ABD: ' + str(self.is_air_by_date) + ']'
to_return += ' [ANIME: ' + str(self.is_anime) + ']'
to_return += ' [whichReg: ' + str(self.which_regex) + ']'
return to_return.encode('utf-8')
@property
def is_air_by_date(self):
if self.air_date:
return True
return False
@property
def is_anime(self):
if len(self.ab_episode_numbers):
return True
return False
class NameParserCache(object):
_previous_parsed = {}
_cache_size = 100
def add(self, name, parse_result):
self._previous_parsed[name] = parse_result
while len(self._previous_parsed) > self._cache_size:
del self._previous_parsed[self._previous_parsed.keys()[0]]
def get(self, name):
if name in self._previous_parsed:
logger.log(u"Using cached parse result for: " + name, logger.DEBUG)
return self._previous_parsed[name]
name_parser_cache = NameParserCache()
class InvalidNameException(Exception):
"""The given release name is not valid"""
class InvalidShowException(Exception):
"""The given show name is not valid"""
| gpl-3.0 |
mgraupe/acq4 | acq4/pyqtgraph/widgets/JoystickButton.py | 52 | 2460 | from ..Qt import QtGui, QtCore
__all__ = ['JoystickButton']
class JoystickButton(QtGui.QPushButton):
sigStateChanged = QtCore.Signal(object, object) ## self, state
def __init__(self, parent=None):
QtGui.QPushButton.__init__(self, parent)
self.radius = 200
self.setCheckable(True)
self.state = None
self.setState(0,0)
self.setFixedWidth(50)
self.setFixedHeight(50)
def mousePressEvent(self, ev):
self.setChecked(True)
self.pressPos = ev.pos()
ev.accept()
def mouseMoveEvent(self, ev):
dif = ev.pos()-self.pressPos
self.setState(dif.x(), -dif.y())
def mouseReleaseEvent(self, ev):
self.setChecked(False)
self.setState(0,0)
def wheelEvent(self, ev):
ev.accept()
def doubleClickEvent(self, ev):
ev.accept()
def getState(self):
return self.state
def setState(self, *xy):
xy = list(xy)
d = (xy[0]**2 + xy[1]**2)**0.5
nxy = [0,0]
for i in [0,1]:
if xy[i] == 0:
nxy[i] = 0
else:
nxy[i] = xy[i]/d
if d > self.radius:
d = self.radius
d = (d/self.radius)**2
xy = [nxy[0]*d, nxy[1]*d]
w2 = self.width()/2.
h2 = self.height()/2
self.spotPos = QtCore.QPoint(w2*(1+xy[0]), h2*(1-xy[1]))
self.update()
if self.state == xy:
return
self.state = xy
self.sigStateChanged.emit(self, self.state)
def paintEvent(self, ev):
QtGui.QPushButton.paintEvent(self, ev)
p = QtGui.QPainter(self)
p.setBrush(QtGui.QBrush(QtGui.QColor(0,0,0)))
p.drawEllipse(self.spotPos.x()-3,self.spotPos.y()-3,6,6)
def resizeEvent(self, ev):
self.setState(*self.state)
QtGui.QPushButton.resizeEvent(self, ev)
if __name__ == '__main__':
app = QtGui.QApplication([])
w = QtGui.QMainWindow()
b = JoystickButton()
w.setCentralWidget(b)
w.show()
w.resize(100, 100)
def fn(b, s):
print("state changed:", s)
b.sigStateChanged.connect(fn)
## Start Qt event loop unless running in interactive mode.
import sys
if sys.flags.interactive != 1:
app.exec_()
| mit |
SantosDevelopers/sborganicos | venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| mit |
AnderEnder/ansible-modules-extras | web_infrastructure/jenkins_plugin.py | 12 | 25559 | #!/usr/bin/python
# encoding: utf-8
# (c) 2016, Jiri Tyr <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.urls import url_argument_spec
import base64
import hashlib
import json
import os
import tempfile
import time
import urllib
DOCUMENTATION = '''
---
module: jenkins_plugin
author: Jiri Tyr (@jtyr)
version_added: '2.2'
short_description: Add or remove Jenkins plugin
description:
- Ansible module which helps to manage Jenkins plugins.
options:
group:
required: false
default: jenkins
description:
- Name of the Jenkins group on the OS.
jenkins_home:
required: false
default: /var/lib/jenkins
description:
- Home directory of the Jenkins user.
mode:
required: false
default: '0664'
description:
- File mode applied on versioned plugins.
name:
required: true
description:
- Plugin name.
owner:
required: false
default: jenkins
description:
- Name of the Jenkins user on the OS.
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options. To
remove an option, set the value of the option to C(null).
state:
required: false
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
description:
- Desired plugin state.
- If the C(latest) is set, the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
timeout:
required: false
default: 30
description:
- Server connection timeout in secs.
updates_expiration:
required: false
default: 86400
description:
- Number of seconds after which a new copy of the I(update-center.json)
file is downloaded. This is used to avoid the need to download the
plugin to calculate its checksum when C(latest) is specified.
- Set it to C(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
C(latest) is specified.
updates_url:
required: false
default: https://updates.jenkins-ci.org
description:
- URL of the Update Centre.
- Used as the base URL to download the plugins and the
I(update-center.json) JSON file.
url:
required: false
default: http://localhost:8080
description:
- URL of the Jenkins server.
version:
required: false
default: null
description:
- Plugin version number.
- If this option is specified, all plugin dependencies must be installed
manually.
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
with_dependencies:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether to install plugin dependencies.
notes:
- Plugin installation shoud be run under root or the same user which owns
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- It's necessary to notify the handler or call the I(service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkis service was
successfully restarted after the plugin installation.
- It is not possible to run the module remotely by changing the I(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
'''
EXAMPLES = '''
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
- name: Install plugin without its dependencies
jenkins_plugin:
name: build-pipeline-plugin
with_dependencies: no
- name: Make sure the plugin is always up-to-date
jenkins_plugin:
name: token-macro
state: latest
- name: Install specific version of the plugin
jenkins_plugin:
name: token-macro
version: 1.15
- name: Pin the plugin
jenkins_plugin:
name: token-macro
state: pinned
- name: Unpin the plugin
jenkins_plugin:
name: token-macro
state: unpinned
- name: Enable the plugin
jenkins_plugin:
name: token-macro
state: enabled
- name: Disable the plugin
jenkins_plugin:
name: token-macro
state: disabled
- name: Uninstall plugin
jenkins_plugin:
name: build-pipeline-plugin
state: absent
#
# Example of how to use the params
#
# Define a variable and specify all default parameters you want to use across
# all jenkins_plugin calls:
#
# my_jenkins_params:
# url_username: admin
# url_password: p4ssw0rd
# url: http://localhost:8888
#
- name: Install plugin
jenkins_plugin:
name: build-pipeline-plugin
params: "{{ my_jenkins_params }}"
#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
hosts: jenkins-master
vars:
my_jenkins_plugins:
token-macro:
enabled: yes
build-pipeline-plugin:
version: 1.4.9
pinned: no
enabled: yes
tasks:
- name: Install plugins without a specific version
jenkins_plugin:
name: "{{ item.key }}"
register: my_jenkins_plugin_unversioned
when: >
'version' not in item.value
with_dict: my_jenkins_plugins
- name: Install plugins with a specific version
jenkins_plugin:
name: "{{ item.key }}"
version: "{{ item.value['version'] }}"
register: my_jenkins_plugin_versioned
when: >
'version' in item.value
with_dict: my_jenkins_plugins
- name: Initiate the fact
set_fact:
jenkins_restart_required: no
- name: Check if restart is required by any of the versioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: my_jenkins_plugin_versioned.results
- name: Check if restart is required by any of the unversioned plugins
set_fact:
jenkins_restart_required: yes
when: item.changed
with_items: my_jenkins_plugin_unversioned.results
- name: Restart Jenkins if required
service:
name: jenkins
state: restarted
when: jenkins_restart_required
# Requires python-httplib2 to be installed on the guest
- name: Wait for Jenkins to start up
uri:
url: http://localhost:8080
status_code: 200
timeout: 5
register: jenkins_service_status
# Keep trying for 5 mins in 5 sec intervals
retries: 60
delay: 5
until: >
'status' in jenkins_service_status and
jenkins_service_status['status'] == 200
when: jenkins_restart_required
- name: Reset the fact
set_fact:
jenkins_restart_required: no
when: jenkins_restart_required
- name: Plugin pinning
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
when: >
'pinned' in item.value
with_dict: my_jenkins_plugins
- name: Plugin enabling
jenkins_plugin:
name: "{{ item.key }}"
state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
when: >
'enabled' in item.value
with_dict: my_jenkins_plugins
'''
RETURN = '''
plugin:
description: plugin name
returned: success
type: string
sample: build-pipeline-plugin
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
class JenkinsPlugin(object):
def __init__(self, module):
# To be able to call fail_json
self.module = module
# Shortcuts for the params
self.params = self.module.params
self.url = self.params['url']
self.timeout = self.params['timeout']
# Crumb
self.crumb = {}
if self._csrf_enabled():
self.crumb = self._get_crumb()
# Get list of installed plugins
self._get_installed_plugins()
def _csrf_enabled(self):
csrf_data = self._get_json_data(
"%s/%s" % (self.url, "api/json"), 'CSRF')
return csrf_data["useCrumbs"]
def _get_json_data(self, url, what, **kwargs):
# Get the JSON data
r = self._get_url_data(url, what, **kwargs)
# Parse the JSON data
try:
json_data = json.load(r)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot parse %s JSON data." % what,
details=e.message)
return json_data
def _get_url_data(
self, url, what=None, msg_status=None, msg_exception=None,
**kwargs):
# Compose default messages
if msg_status is None:
msg_status = "Cannot get %s" % what
if msg_exception is None:
msg_exception = "Retrieval of %s failed." % what
# Get the URL data
try:
response, info = fetch_url(
self.module, url, timeout=self.timeout, **kwargs)
if info['status'] != 200:
self.module.fail_json(msg=msg_status, details=info['msg'])
except Exception:
e = get_exception()
self.module.fail_json(msg=msg_exception, details=e.message)
return response
def _get_crumb(self):
crumb_data = self._get_json_data(
"%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
ret = {
crumb_data['crumbRequestField']: crumb_data['crumb']
}
else:
self.module.fail_json(
msg="Required fields not found in the Crum response.",
details=crumb_data)
return ret
def _get_installed_plugins(self):
plugins_data = self._get_json_data(
"%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
'list of plugins')
# Check if we got valid data
if 'plugins' not in plugins_data:
self.module.fail_json(msg="No valid plugin data found.")
# Create final list of installed/pined plugins
self.is_installed = False
self.is_pinned = False
self.is_enabled = False
for p in plugins_data['plugins']:
if p['shortName'] == self.params['name']:
self.is_installed = True
if p['pinned']:
self.is_pinned = True
if p['enabled']:
self.is_enabled = True
break
def install(self):
changed = False
plugin_file = (
'%s/plugins/%s.jpi' % (
self.params['jenkins_home'],
self.params['name']))
if not self.is_installed and self.params['version'] is None:
if not self.module.check_mode:
# Install the plugin (with dependencies)
install_script = (
'd = Jenkins.instance.updateCenter.getPlugin("%s")'
'.deploy(); d.get();' % self.params['name'])
if self.params['with_dependencies']:
install_script = (
'Jenkins.instance.updateCenter.getPlugin("%s")'
'.getNeededDependencies().each{it.deploy()}; %s' % (
self.params['name'], install_script))
script_data = {
'script': install_script
}
script_data.update(self.crumb)
data = urllib.urlencode(script_data)
# Send the installation request
r = self._get_url_data(
"%s/scriptText" % self.url,
msg_status="Cannot install plugin.",
msg_exception="Plugin installation has failed.",
data=data)
changed = True
else:
# Check if the plugin directory exists
if not os.path.isdir(self.params['jenkins_home']):
self.module.fail_json(
msg="Jenkins home directory doesn't exist.")
md5sum_old = None
if os.path.isfile(plugin_file):
# Make the checksum of the currently installed plugin
md5sum_old = hashlib.md5(
open(plugin_file, 'rb').read()).hexdigest()
if self.params['version'] in [None, 'latest']:
# Take latest version
plugin_url = (
"%s/latest/%s.hpi" % (
self.params['updates_url'],
self.params['name']))
else:
# Take specific version
plugin_url = (
"{0}/download/plugins/"
"{1}/{2}/{1}.hpi".format(
self.params['updates_url'],
self.params['name'],
self.params['version']))
if (
self.params['updates_expiration'] == 0 or
self.params['version'] not in [None, 'latest'] or
md5sum_old is None):
# Download the plugin file directly
r = self._download_plugin(plugin_url)
# Write downloaded plugin into file if checksums don't match
if md5sum_old is None:
# No previously installed plugin
if not self.module.check_mode:
self._write_file(plugin_file, r)
changed = True
else:
# Get data for the MD5
data = r.read()
# Make new checksum
md5sum_new = hashlib.md5(data).hexdigest()
# If the checksum is different from the currently installed
# plugin, store the new plugin
if md5sum_old != md5sum_new:
if not self.module.check_mode:
self._write_file(plugin_file, data)
changed = True
else:
# Check for update from the updates JSON file
plugin_data = self._download_updates()
try:
sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot calculate SHA1 of the old plugin.",
details=e.message)
sha1sum_old = base64.b64encode(sha1_old.digest())
# If the latest version changed, download it
if sha1sum_old != plugin_data['sha1']:
if not self.module.check_mode:
r = self._download_plugin(plugin_url)
self._write_file(plugin_file, r)
changed = True
# Change file attributes if needed
if os.path.isfile(plugin_file):
params = {
'dest': plugin_file
}
params.update(self.params)
file_args = self.module.load_file_common_arguments(params)
if not self.module.check_mode:
# Not sure how to run this in the check mode
changed = self.module.set_fs_attributes_if_different(
file_args, changed)
else:
# See the comment above
changed = True
return changed
def _download_updates(self):
updates_filename = 'jenkins-plugin-cache.json'
updates_dir = os.path.expanduser('~/.ansible/tmp')
updates_file = "%s/%s" % (updates_dir, updates_filename)
download_updates = True
# Check if we need to download new updates file
if os.path.isfile(updates_file):
# Get timestamp when the file was changed last time
ts_file = os.stat(updates_file).st_mtime
ts_now = time.time()
if ts_now - ts_file < self.params['updates_expiration']:
download_updates = False
updates_file_orig = updates_file
# Download the updates file if needed
if download_updates:
url = "%s/update-center.json" % self.params['updates_url']
# Get the data
r = self._get_url_data(
url,
msg_status="Remote updates not found.",
msg_exception="Updates download failed.")
# Write the updates file
updates_file = tempfile.mkstemp()
try:
fd = open(updates_file, 'wb')
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open the tmp updates file %s." % updates_file,
details=str(e))
fd.write(r.read())
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot close the tmp updates file %s." % updates_file,
detail=str(e))
# Open the updates file
try:
f = open(updates_file)
except IOError:
e = get_exception()
self.module.fail_json(
msg="Cannot open temporal updates file.",
details=str(e))
i = 0
for line in f:
# Read only the second line
if i == 1:
try:
data = json.loads(line)
except Exception:
e = get_exception()
self.module.fail_json(
msg="Cannot load JSON data from the tmp updates file.",
details=e.message)
break
i += 1
# Move the updates file to the right place if we could read it
if download_updates:
# Make sure the destination directory exists
if not os.path.isdir(updates_dir):
try:
os.makedirs(updates_dir, int('0700', 8))
except OSError:
e = get_exception()
self.module.fail_json(
msg="Cannot create temporal directory.",
details=e.message)
self.module.atomic_move(updates_file, updates_file_orig)
# Check if we have the plugin data available
if 'plugins' not in data or self.params['name'] not in data['plugins']:
self.module.fail_json(
msg="Cannot find plugin data in the updates file.")
return data['plugins'][self.params['name']]
def _download_plugin(self, plugin_url):
# Download the plugin
r = self._get_url_data(
plugin_url,
msg_status="Plugin not found.",
msg_exception="Plugin download failed.")
return r
def _write_file(self, f, data):
# Store the plugin into a temp file and then move it
tmp_f = tempfile.mkstemp()
try:
fd = open(tmp_f, 'wb')
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot open the temporal plugin file %s.' % tmp_f,
details=str(e))
if isinstance(data, str):
d = data
else:
d = data.read()
fd.write(d)
try:
fd.close()
except IOError:
e = get_exception()
self.module.fail_json(
msg='Cannot close the temporal plugin file %s.' % tmp_f,
details=str(e))
# Move the file onto the right place
self.module.atomic_move(tmp_f, f)
def uninstall(self):
changed = False
# Perform the action
if self.is_installed:
if not self.module.check_mode:
self._pm_query('doUninstall', 'Uninstallation')
changed = True
return changed
def pin(self):
return self._pinning('pin')
def unpin(self):
return self._pinning('unpin')
def _pinning(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'pin' and not self.is_pinned or
action == 'unpin' and self.is_pinned):
# Perform the action
if not self.module.check_mode:
self._pm_query(action, "%sning" % action.capitalize())
changed = True
return changed
def enable(self):
return self._enabling('enable')
def disable(self):
return self._enabling('disable')
def _enabling(self, action):
changed = False
# Check if the plugin is pinned/unpinned
if (
action == 'enable' and not self.is_enabled or
action == 'disable' and self.is_enabled):
# Perform the action
if not self.module.check_mode:
self._pm_query(
"make%sd" % action.capitalize(),
"%sing" % action[:-1].capitalize())
changed = True
return changed
def _pm_query(self, action, msg):
url = "%s/pluginManager/plugin/%s/%s" % (
self.params['url'], self.params['name'], action)
data = urllib.urlencode(self.crumb)
# Send the request
self._get_url_data(
url,
msg_status="Plugin not found. %s" % url,
msg_exception="%s has failed." % msg,
data=data)
def main():
# Module arguments
argument_spec = url_argument_spec()
argument_spec.update(
group=dict(default='jenkins'),
jenkins_home=dict(default='/var/lib/jenkins'),
mode=dict(default='0644', type='raw'),
name=dict(required=True),
owner=dict(default='jenkins'),
params=dict(type='dict'),
state=dict(
choices=[
'present',
'absent',
'pinned',
'unpinned',
'enabled',
'disabled',
'latest'],
default='present'),
timeout=dict(default=30, type="int"),
updates_expiration=dict(default=86400, type="int"),
updates_url=dict(default='https://updates.jenkins-ci.org'),
url=dict(default='http://localhost:8080'),
url_password=dict(no_log=True),
version=dict(),
with_dependencies=dict(default=True, type='bool'),
)
# Module settings
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Force basic authentication
module.params['force_basic_auth'] = True
# Convert timeout to float
try:
module.params['timeout'] = float(module.params['timeout'])
except ValueError:
e = get_exception()
module.fail_json(
msg='Cannot convert %s to float.' % module.params['timeout'],
details=str(e))
# Set version to latest if state is latest
if module.params['state'] == 'latest':
module.params['state'] = 'present'
module.params['version'] = 'latest'
# Create some shortcuts
name = module.params['name']
state = module.params['state']
# Initial change state of the task
changed = False
# Instantiate the JenkinsPlugin object
jp = JenkinsPlugin(module)
# Perform action depending on the requested state
if state == 'present':
changed = jp.install()
elif state == 'absent':
changed = jp.uninstall()
elif state == 'pinned':
changed = jp.pin()
elif state == 'unpinned':
changed = jp.unpin()
elif state == 'enabled':
changed = jp.enable()
elif state == 'disabled':
changed = jp.disable()
# Print status of the change
module.exit_json(changed=changed, plugin=name, state=state)
if __name__ == '__main__':
main()
| gpl-3.0 |
GiladE/birde | venv/lib/python2.7/site-packages/gunicorn/http/wsgi.py | 26 | 13493 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import io
import logging
import os
import re
import sys
from gunicorn._compat import unquote_to_wsgi_str
from gunicorn.six import string_types, binary_type, reraise
from gunicorn import SERVER_SOFTWARE
import gunicorn.six as six
import gunicorn.util as util
try:
# Python 3.3 has os.sendfile().
from os import sendfile
except ImportError:
try:
from ._sendfile import sendfile
except ImportError:
sendfile = None
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
log = logging.getLogger(__name__)
class FileWrapper(object):
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike, 'close'):
self.close = filelike.close
def __getitem__(self, key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
class WSGIErrorsWrapper(io.RawIOBase):
def __init__(self, cfg):
errorlog = logging.getLogger("gunicorn.error")
handlers = errorlog.handlers
self.streams = []
if cfg.errorlog == "-":
self.streams.append(sys.stderr)
handlers = handlers[1:]
for h in handlers:
if hasattr(h, "stream"):
self.streams.append(h.stream)
def write(self, data):
for stream in self.streams:
try:
stream.write(data)
except UnicodeError:
stream.write(data.encode("UTF-8"))
stream.flush()
def base_environ(cfg):
return {
"wsgi.errors": WSGIErrorsWrapper(cfg),
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": (cfg.workers > 1),
"wsgi.run_once": False,
"wsgi.file_wrapper": FileWrapper,
"SERVER_SOFTWARE": SERVER_SOFTWARE,
}
def default_environ(req, sock, cfg):
env = base_environ(cfg)
env.update({
"wsgi.input": req.body,
"gunicorn.socket": sock,
"REQUEST_METHOD": req.method,
"QUERY_STRING": req.query,
"RAW_URI": req.uri,
"SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version])
})
return env
def proxy_environ(req):
info = req.proxy_protocol_info
if not info:
return {}
return {
"PROXY_PROTOCOL": info["proxy_protocol"],
"REMOTE_ADDR": info["client_addr"],
"REMOTE_PORT": str(info["client_port"]),
"PROXY_ADDR": info["proxy_addr"],
"PROXY_PORT": str(info["proxy_port"]),
}
def create(req, sock, client, server, cfg):
resp = Response(req, sock, cfg)
# set initial environ
environ = default_environ(req, sock, cfg)
# default variables
host = None
url_scheme = "https" if cfg.is_ssl else "http"
script_name = os.environ.get("SCRIPT_NAME", "")
# set secure_headers
secure_headers = cfg.secure_scheme_headers
if client and not isinstance(client, string_types):
if ('*' not in cfg.forwarded_allow_ips
and client[0] not in cfg.forwarded_allow_ips):
secure_headers = {}
# add the headers tot the environ
for hdr_name, hdr_value in req.headers:
if hdr_name == "EXPECT":
# handle expect
if hdr_value.lower() == "100-continue":
sock.send(b"HTTP/1.1 100 Continue\r\n\r\n")
elif secure_headers and (hdr_name in secure_headers and
hdr_value == secure_headers[hdr_name]):
url_scheme = "https"
elif hdr_name == 'HOST':
host = hdr_value
elif hdr_name == "SCRIPT_NAME":
script_name = hdr_value
elif hdr_name == "CONTENT-TYPE":
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == "CONTENT-LENGTH":
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_' + hdr_name.replace('-', '_')
if key in environ:
hdr_value = "%s,%s" % (environ[key], hdr_value)
environ[key] = hdr_value
# set the url schejeme
environ['wsgi.url_scheme'] = url_scheme
# set the REMOTE_* keys in environ
# authors should be aware that REMOTE_HOST and REMOTE_ADDR
# may not qualify the remote addr:
# http://www.ietf.org/rfc/rfc3875
if isinstance(client, string_types):
environ['REMOTE_ADDR'] = client
elif isinstance(client, binary_type):
environ['REMOTE_ADDR'] = str(client)
else:
environ['REMOTE_ADDR'] = client[0]
environ['REMOTE_PORT'] = str(client[1])
# handle the SERVER_*
# Normally only the application should use the Host header but since the
# WSGI spec doesn't support unix sockets, we are using it to create
# viable SERVER_* if possible.
if isinstance(server, string_types):
server = server.split(":")
if len(server) == 1:
# unix socket
if host and host is not None:
server = host.split(':')
if len(server) == 1:
if url_scheme == "http":
server.append(80),
elif url_scheme == "https":
server.append(443)
else:
server.append('')
else:
# no host header given which means that we are not behind a
# proxy, so append an empty port.
server.append('')
environ['SERVER_NAME'] = server[0]
environ['SERVER_PORT'] = str(server[1])
# set the path and script name
path_info = req.path
if script_name:
path_info = path_info.split(script_name, 1)[1]
environ['PATH_INFO'] = unquote_to_wsgi_str(path_info)
environ['SCRIPT_NAME'] = script_name
# override the environ with the correct remote and server address if
# we are behind a proxy using the proxy protocol.
environ.update(proxy_environ(req))
return resp, environ
class Response(object):
def __init__(self, req, sock, cfg):
self.req = req
self.sock = sock
self.version = SERVER_SOFTWARE
self.status = None
self.chunked = False
self.must_close = False
self.headers = []
self.headers_sent = False
self.response_length = None
self.sent = 0
self.upgrade = False
self.cfg = cfg
def force_close(self):
self.must_close = True
def should_close(self):
if self.must_close or self.req.should_close():
return True
if self.response_length is not None or self.chunked:
return False
if self.status_code < 200 or self.status_code in (204, 304):
return False
return True
def start_response(self, status, headers, exc_info=None):
if exc_info:
try:
if self.status and self.headers_sent:
reraise(exc_info[0], exc_info[1], exc_info[2])
finally:
exc_info = None
elif self.status is not None:
raise AssertionError("Response headers already set!")
self.status = status
# get the status code from the response here so we can use it to check
# the need for the connection header later without parsing the string
# each time.
try:
self.status_code = int(self.status.split()[0])
except ValueError:
self.status_code = None
self.process_headers(headers)
self.chunked = self.is_chunked()
return self.write
def process_headers(self, headers):
for name, value in headers:
if not isinstance(name, string_types):
raise TypeError('%r is not a string' % name)
value = str(value).strip()
lname = name.lower().strip()
if lname == "content-length":
self.response_length = int(value)
elif util.is_hoppish(name):
if lname == "connection":
# handle websocket
if value.lower().strip() == "upgrade":
self.upgrade = True
elif lname == "upgrade":
if value.lower().strip() == "websocket":
self.headers.append((name.strip(), value))
# ignore hopbyhop headers
continue
self.headers.append((name.strip(), value))
def is_chunked(self):
# Only use chunked responses when the client is
# speaking HTTP/1.1 or newer and there was
# no Content-Length header set.
if self.response_length is not None:
return False
elif self.req.version <= (1, 0):
return False
elif self.status_code in (204, 304):
# Do not use chunked responses when the response is guaranteed to
# not have a response body.
return False
return True
def default_headers(self):
# set the connection header
if self.upgrade:
connection = "upgrade"
elif self.should_close():
connection = "close"
else:
connection = "keep-alive"
headers = [
"HTTP/%s.%s %s\r\n" % (self.req.version[0],
self.req.version[1], self.status),
"Server: %s\r\n" % self.version,
"Date: %s\r\n" % util.http_date(),
"Connection: %s\r\n" % connection
]
if self.chunked:
headers.append("Transfer-Encoding: chunked\r\n")
return headers
def send_headers(self):
if self.headers_sent:
return
tosend = self.default_headers()
tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers])
header_str = "%s\r\n" % "".join(tosend)
util.write(self.sock, util.to_bytestring(header_str))
self.headers_sent = True
def write(self, arg):
self.send_headers()
if not isinstance(arg, binary_type):
raise TypeError('%r is not a byte' % arg)
arglen = len(arg)
tosend = arglen
if self.response_length is not None:
if self.sent >= self.response_length:
# Never write more than self.response_length bytes
return
tosend = min(self.response_length - self.sent, tosend)
if tosend < arglen:
arg = arg[:tosend]
# Sending an empty chunk signals the end of the
# response and prematurely closes the response
if self.chunked and tosend == 0:
return
self.sent += tosend
util.write(self.sock, arg, self.chunked)
def can_sendfile(self):
return (self.cfg.sendfile and (sendfile is not None))
def sendfile_all(self, fileno, sockno, offset, nbytes):
# Send file in at most 1GB blocks as some operating
# systems can have problems with sending files in blocks
# over 2GB.
BLKSIZE = 0x3FFFFFFF
if nbytes > BLKSIZE:
for m in range(0, nbytes, BLKSIZE):
self.sendfile_all(fileno, sockno, offset, min(nbytes, BLKSIZE))
offset += BLKSIZE
nbytes -= BLKSIZE
else:
sent = 0
sent += sendfile(sockno, fileno, offset + sent, nbytes - sent)
while sent != nbytes:
sent += sendfile(sockno, fileno, offset + sent, nbytes - sent)
def sendfile_use_send(self, fileno, fo_offset, nbytes):
# send file in blocks of 8182 bytes
BLKSIZE = 8192
sent = 0
while sent != nbytes:
data = os.read(fileno, BLKSIZE)
if not data:
break
sent += len(data)
if sent > nbytes:
data = data[:nbytes - sent]
util.write(self.sock, data, self.chunked)
def write_file(self, respiter):
if self.can_sendfile() and util.is_fileobject(respiter.filelike):
# sometimes the fileno isn't a callable
if six.callable(respiter.filelike.fileno):
fileno = respiter.filelike.fileno()
else:
fileno = respiter.filelike.fileno
fd_offset = os.lseek(fileno, 0, os.SEEK_CUR)
fo_offset = respiter.filelike.tell()
nbytes = max(os.fstat(fileno).st_size - fo_offset, 0)
if self.response_length:
nbytes = min(nbytes, self.response_length)
if nbytes == 0:
return
self.send_headers()
if self.cfg.is_ssl:
self.sendfile_use_send(fileno, fo_offset, nbytes)
else:
if self.is_chunked():
chunk_size = "%X\r\n" % nbytes
self.sock.sendall(chunk_size.encode('utf-8'))
self.sendfile_all(fileno, self.sock.fileno(), fo_offset, nbytes)
if self.is_chunked():
self.sock.sendall(b"\r\n")
os.lseek(fileno, fd_offset, os.SEEK_SET)
else:
for item in respiter:
self.write(item)
def close(self):
if not self.headers_sent:
self.send_headers()
if self.chunked:
util.write_chunk(self.sock, b"")
| mit |
TeamEOS/kernel_oppo_find7 | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
quang-ha/lammps | tools/i-pi/ipi/utils/io/io_xml.py | 33 | 15954 | """Contains the functions used to read the input file and print the checkpoint
files with xml formatting.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Functions:
xml_node: Class to handle a particular xml tag.
xml_handler: Class giving general xml data reading methods.
xml_parse_string: Parses a string made from a section of a xml input file.
xml_parse_file: Parses an entire xml input file.
read_type: Reads a string and outputs data of a specified type.
read_float: Reads a string and outputs a float.
read_int: Reads a string and outputs an integer.
read_bool: Reads a string and outputs a boolean.
read_list: Reads a string and outputs a list.
read_array: Reads a string and outputs an array.
read_tuple: Reads a string and outputs a tuple.
read_dict: Reads a string and outputs a dictionary.
write_type: Writes a string from data of a specified type.
write_list: Writes a string from a list.
write_tuple: Writes a string from a tuple.
write_float: Writes a string from a float.
write_bool: Writes a string from a boolean.
write_dict: Writes a string from a dictionary.
"""
__all__ = ['xml_node', 'xml_handler', 'xml_parse_string', 'xml_parse_file',
'read_type', 'read_float', 'read_int', 'read_bool', 'read_list',
'read_array', 'read_tuple', 'read_dict', 'write_type', 'write_list',
'write_tuple', 'write_float', 'write_bool', 'write_dict']
from xml.sax import parseString, parse
from xml.sax.handler import ContentHandler
import numpy as np
import string
class xml_node(object):
"""Class to handle a particular xml tag.
Tags are generally written in the form
<tag_name attribs="attrib_data"> main_data </tag_name>. This class holds
tag_name, attrib_data and main_data separately so they can be used to
create the objects with the appropriate names and data.
Attributes:
attribs: The attribute data for the tag.
fields: The rest of the data.
name: The tag name.
"""
def __init__(self, attribs=None, name="", fields=None):
"""Initialises xml_node.
Args:
attribs: An optional dictionary giving attribute data. Defaults to {}.
fields: An optional dictionary holding all the data between the start
and end tags, including information about other nodes.
Defaults to {}.
name: An optional string giving the tag name. Defaults to ''.
"""
if attribs is None:
attribs = {}
if fields is None:
fields = []
self.attribs = attribs
self.name = name
self.fields = fields
class xml_handler(ContentHandler):
"""Class giving general xml_reading methods.
Uses the standard python xml_reader to read the different kinds of data.
Keeps track of the heirarchial nature of an xml file by recording the level
of nesting, so that the correct data and attributes can be associated with
the correct tag name.
Attributes:
root: An xml_node object for the root node.
open: The list of the tags that the parser is currently between the start
and end tags of.
level: The level of nesting that the parser is currently at.
buffer: A list of the data found between the tags at the different levels
of nesting.
"""
def __init__(self):
"""Initialises xml_handler."""
#root xml node with all the data
self.root = xml_node(name="root", fields=[])
self.open = [self.root]
#current level of the hierarchy
self.level = 0
#Holds all the data between each of the tags.
#If level = 1, then buffer[0] holds all the data collected between the
#root tags, and buffer[1] holds all the data collected between the
#first child tag.
self.buffer = [[""]]
def startElement(self, name, attrs):
"""Reads an opening tag.
Adds the opening tag to the list of open tags, adds a new space in the
buffer, reads the appropriate attributes and adds a new level to the
heirarchy.
Args:
name: The tag_name.
attrs: The attribute data.
"""
#creates a new node
newnode = xml_node(attribs=dict((k,attrs[k]) for k in attrs.keys()), name=name, fields=[])
#adds it to the list of open nodes
self.open.append(newnode)
#adds it to the list of fields of the parent tag
self.open[self.level].fields.append((name,newnode))
#gets ready to read new data
self.buffer.append([""])
self.level += 1
def characters(self, data):
"""Reads data.
Adds the data to the buffer of the current level of the heirarchy.
Data is read as a string, and needs to be converted to the required
type later.
Args:
data: The data to be read.
"""
self.buffer[self.level].append(data)
def endElement(self, name):
"""Reads a closing tag.
Once all the data has been read, and the closing tag found, the buffer
is read into the appropriate field.
Args:
name: The tag_name.
"""
#all the text found between the tags stored in the appropriate xml_node
#object
self.buffer[self.level] = ''.join(self.buffer[self.level])
self.open[self.level].fields.append(("_text" , self.buffer[self.level]))
#'closes' the xml_node object, as we are no longer within its tags, so
#there is no more data to be added to it.
#Note that the xml_node is still held within the parent tag, so we
#no longer require this xml node object.
self.buffer.pop(self.level)
self.open.pop(self.level)
self.level -= 1
def xml_parse_string(buf):
"""Parses a string made from a section of a xml input file.
Args:
buf: A string in correct xml format.
Returns:
A xml_node for the root node of the file.
"""
myhandle = xml_handler()
parseString(buf, myhandle)
return myhandle.root
def xml_parse_file(stream):
"""Parses an entire xml input file.
Args:
stream: A string describing a xml formatted file.
Returns:
A xml_node for the root node of the file.
"""
myhandle = xml_handler()
parse(stream, myhandle)
return myhandle.root
def read_type(type, data):
"""Reads a string and outputs data of a specified type.
Args:
type: The data type of the target container.
data: The string to be read in.
Raises:
TypeError: Raised if it tries to read into a data type that has not been
implemented.
Returns:
An object of type type.
"""
if not type in readtype_funcs:
raise TypeError("Conversion not available for given type")
return type(readtype_funcs[type](data))
def read_float(data):
"""Reads a string and outputs a float.
Args:
data: The string to be read in.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A float.
"""
return float(data)
def read_int(data):
"""Reads a string and outputs a integer.
Args:
data: The string to be read in.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
An integer.
"""
return int(data)
def read_bool(data):
"""Reads a string and outputs a boolean.
Takes a string of the form 'true' or 'false', and returns the appropriate
boolean.
Args:
data: The string to be read in.
Raises:
ValueError: Raised if the string is not 'true' or 'false'.
Returns:
A boolean.
"""
if data.strip().upper() == "TRUE":
return True
elif data.strip().upper() == "FALSE":
return False
else:
raise ValueError(data + " does not represent a bool value")
def read_list(data, delims="[]", split=",", strip=" \n\t'"):
"""Reads a formatted string and outputs a list.
The string must be formatted in the correct way.
The start character must be delimiters[0], the end character
must be delimiters[1] and each element must be split along
the character split. Characters at the beginning or
end of each element in strip are ignored. The standard list format is of the
form '[array[0], array[1],..., array[n]]', which is used for actual lists.
Other formats are used for tuples and dictionaries.
Args:
data: The string to be read in. '[]' by default.
delims: A string of two characters giving the first and last character of
the list format. ',' by default.
split: The character between different elements of the list format.
strip: Characters to be removed from the beginning and end of each
element. ' \n\t' by default.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A list of strings.
"""
try:
begin = data.index(delims[0])
end = data.index(delims[1])
except ValueError:
raise ValueError("Error in list syntax: could not locate delimiters")
rlist = data[begin+1:end].split(split)
for i in range(len(rlist)):
rlist[i] = rlist[i].strip(strip)
# handles empty lists correctly
if len(rlist) == 1 and rlist[0] == "":
rlist = []
return rlist
def read_array(dtype, data):
"""Reads a formatted string and outputs an array.
The format is as for standard python arrays, which is
[array[0], array[1], ... , array[n]]. Note the use of comma separators, and
the use of square brackets.
Args:
data: The string to be read in.
dtype: The data type of the elements of the target array.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
An array of data type dtype.
"""
rlist = read_list(data)
for i in range(len(rlist)):
rlist[i] = read_type(dtype,rlist[i])
return np.array(rlist, dtype)
def read_tuple(data, delims="()", split=",", strip=" \n\t'", arg_type=int):
"""Reads a formatted string and outputs a tuple.
The format is as for standard python tuples, which is
(tuple[0], tuple[1], ... , tuple[n]). Note the comma
separators, and the use of brackets.
Args:
data: The string to be read in.
delims: A string of two characters giving the first and last character of
the list format. ',' by default.
split: The character between different elements of the list format.
strip: Characters to be removed from the beginning and end of each
element. ' \n\t' by default.
arg_type: The strings in the input will be converted, and a tuple
of ar_type will be returned.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A tuple of elements of the specified data type.
"""
rlist = read_list(data, delims=delims, split=split, strip=strip)
return tuple([arg_type(i) for i in rlist])
def read_dict(data, delims="{}", split=",", key_split=":", strip=" \n\t"):
"""Reads a formatted string and outputs a dictionary.
The format is as for standard python dictionaries, which is
{keyword[0]: arg[0], keyword[1]: arg[1], ... , keyword[n]: arg[n]}. Note the
comma separators, and the use of curly brackets.
Args:
data: The string to be read in.
delims: A string of two characters giving the first and last character of
the list format. ',' by default.
split: The character between different elements of the list format.
key_split: The character between the key word and the value.
strip: Characters to be removed from the beginning and end of each
element. ' \n\t' by default.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A dictionary of strings.
"""
rlist = read_list(data, delims=delims, split=split, strip=strip)
def mystrip(data):
return data.strip(strip)
rdict = {}
for s in rlist:
rtuple = map(mystrip,s.split(key_split))
if not len(rtuple) == 2:
raise ValueError("Format for a key:value format is wrong for item " + s)
rdict[rtuple[0]] = rtuple[1]
return rdict
readtype_funcs = {np.ndarray: read_array, dict: read_dict, float: read_float, int: read_int, bool: read_bool, str: string.strip, tuple: read_tuple, np.uint : read_int}
def write_type(type, data):
"""Writes a formatted string from a value of a specified type.
Args:
type: The data type of the value.
data: The value to be read in.
Raises:
TypeError: Raised if it tries to write from a data type that has not been
implemented.
Returns:
A formatted string.
"""
if not type in writetype_funcs:
raise TypeError("Conversion not available for given type")
return writetype_funcs[type](data)
def write_list(data, delims="[]"):
"""Writes a formatted string from a list.
The format of the output is as for a standard python list,
[list[0], list[1],..., list[n]]. Note the space after the commas, and the
use of square brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "[]".
Returns:
A formatted string.
"""
rstr = delims[0]
for v in data:
rstr += str(v) + ", "
rstr = rstr.rstrip(", ")
rstr += delims[1]
return rstr
def write_tuple(data):
"""Writes a formatted string from a tuple.
The format of the output is as for a standard python tuple,
(tuple[0], tuple[1],..., tuple[n]). Note the space after the commas, and the
use of brackets.
Args:
data: The value to be read in.
Returns:
A formatted string.
"""
return write_list(data, delims="()")
def write_float(data):
"""Writes a formatted string from a float.
Floats are printed out in exponential format, to 8 decimal places and
filling up any spaces under 16 not used with spaces.
For example 1.0 --> ' 1.00000000e+00'
Args:
data: The value to be read in.
Returns:
A formatted string.
"""
return "%16.8e" % (data)
def write_bool(data):
"""Writes a formatted string from a float.
Booleans are printed as a string of either ' true' or 'false'. Note that
both are printed out as exactly 5 characters.
Args:
data: The value to be read in.
Returns:
A formatted string.
"""
return "%5.5s" % (str(data))
def write_dict(data, delims="{}"):
"""Writes a formatted string from a dictionary.
The format of the output is as for a standard python dictionary,
{keyword[0]: arg[0], keyword[1]: arg[1],..., keyword[n]: arg[n]}. Note the
space after the commas, and the use of curly brackets.
Args:
data: The value to be read in.
delims: An optional string of two characters giving the first and last
character to be printed. Defaults to "{}".
Returns:
A formatted string.
"""
rstr = delims[0]
for v in data:
rstr += str(v) + ": " + str(data[v]) + ", "
rstr = rstr.strip(", ")
rstr += delims[1]
return rstr
writetype_funcs = {float: write_float, dict: write_dict, int: str, bool: write_bool, str: string.strip, tuple: write_tuple, np.uint : str}
| gpl-2.0 |
youprofit/zato | code/zato-server/src/zato/server/service/internal/kvdb/__init__.py | 6 | 4387 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from logging import DEBUG, getLogger
from traceback import format_exc
# gevent
from gevent import sleep
# Redis
from redis.sentinel import MasterNotFoundError
# Zato
from zato.common import ZatoException
from zato.common.kvdb import redis_grammar
from zato.common.util import has_redis_sentinels
from zato.server.service.internal import AdminService, AdminSIO
kvdb_logger = getLogger('zato_kvdb')
class ExecuteCommand(AdminService):
""" Executes a command against the key/value DB.
"""
name = 'zato.kvdb.remote-command.execute'
class SimpleIO(AdminSIO):
request_elem = 'zato_kvdb_remote_command_execute_request'
response_elem = 'zato_kvdb_remote_command_execute_response'
input_required = ('command',)
output_required = ('result',)
def _fixup_parameters(self, parameters):
""" Fix up quotes so stuff like [SISMEMBER key member] and [SISMEMBER key "member"] is treated the same
(brackets used here for clarity only to separate commands).
"""
if parameters:
has_one = len(parameters) == 1
first_elem_idx = 0 if has_one else 1
if parameters[first_elem_idx][0] == '"' and parameters[-1][-1] == '"':
parameters[first_elem_idx] = parameters[first_elem_idx][1:]
parameters[-1] = parameters[-1][:-1]
return parameters
def handle(self):
input_command = self.request.input.command or ''
if not input_command:
msg = 'No command sent'
raise ZatoException(self.cid, msg)
try:
parse_result = redis_grammar.parseString(input_command)
options = {}
command = parse_result.command
parameters = parse_result.parameters if parse_result.parameters else []
parameters = self._fixup_parameters(parameters)
if command == 'CONFIG':
options['parse'] = parameters[0]
elif command == 'OBJECT':
options['infotype'] = parameters[0]
response = self.server.kvdb.conn.execute_command(command, *parameters, **options) or ''
if response and command in('KEYS', 'HKEYS', 'HVALS'):
response = unicode(response).encode('utf-8')
elif command in('HLEN', 'LLEN', 'LRANGE', 'SMEMBERS', 'HGETALL'):
response = str(response)
self.response.payload.result = response or '(None)'
except Exception, e:
msg = 'Command parsing error, command:[{}], e:[{}]'.format(input_command, format_exc(e))
self.logger.error(msg)
raise ZatoException(self.cid, msg)
class LogConnectionInfo(AdminService):
""" Writes outs to logs information regarding current connections to KVDB.
"""
def handle(self):
config = self.server.fs_server_config.kvdb
sleep_time = float(config.log_connection_info_sleep_time)
has_sentinels = has_redis_sentinels(config)
if kvdb_logger.isEnabledFor(DEBUG):
while True:
if has_sentinels:
try:
master_address = self.kvdb.conn.connection_pool.connection_kwargs['connection_pool'].get_master_address()
kvdb_logger.debug(
'Uses sentinels: `%s %r`, master: `%r`', has_sentinels, config.redis_sentinels, master_address)
except MasterNotFoundError, e:
self.logger.warn(format_exc(e))
kvdb_logger.warn(format_exc(e))
else:
kvdb_logger.debug(
'Uses sentinels: `%s`, conn:`%r`', has_sentinels, self.kvdb.conn)
sleep(sleep_time)
# The data browser will most likely be implemented in a future version
'''
class GetList(AdminService):
""" Returns a list of keys, optionally including their values.
"""
# KEYS, then
# HGETALL
# GET
# LRANGE
# SMEMBERS
'''
| gpl-3.0 |
mdublin/Brightcove-Dynamic-Ingest-App | ENV/lib/python2.7/site-packages/sqlalchemy/testing/suite/test_types.py | 12 | 11192 | # coding: utf-8
from .. import fixtures, config
from ..assertions import eq_
from ..config import requirements
from sqlalchemy import Integer, Unicode, UnicodeText, select
from sqlalchemy import Date, DateTime, Time, MetaData, String, \
Text, Numeric, Float
from ..schema import Table, Column
from ... import testing
import decimal
import datetime
class _UnicodeFixture(object):
__requires__ = 'unicode_data',
data = u"Alors vous imaginez ma surprise, au lever du jour, "\
u"quand une drôle de petite voix m’a réveillé. Elle "\
u"disait: « S’il vous plaît… dessine-moi un mouton! »"
@classmethod
def define_tables(cls, metadata):
Table('unicode_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('unicode_data', cls.datatype),
)
def test_round_trip(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{
'unicode_data': self.data,
}
)
row = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).first()
eq_(
row,
(self.data, )
)
assert isinstance(row[0], unicode)
def test_round_trip_executemany(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
[
{
'unicode_data': self.data,
}
for i in xrange(3)
]
)
rows = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).fetchall()
eq_(
rows,
[(self.data, ) for i in xrange(3)]
)
for row in rows:
assert isinstance(row[0], unicode)
def _test_empty_strings(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{"unicode_data": u''}
)
row = config.db.execute(
select([unicode_table.c.unicode_data])
).first()
eq_(row, (u'',))
class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data',
datatype = Unicode(255)
@requirements.empty_strings_varchar
def test_empty_strings_varchar(self):
self._test_empty_strings()
class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data', 'text_type'
datatype = UnicodeText()
@requirements.empty_strings_text
def test_empty_strings_text(self):
self._test_empty_strings()
class TextTest(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
Table('text_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('text_data', Text),
)
def test_text_roundtrip(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": 'some text'}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('some text',))
def test_text_empty_strings(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": ''}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('',))
class StringTest(fixtures.TestBase):
@requirements.unbounded_varchar
def test_nolength_string(self):
metadata = MetaData()
foo = Table('foo', metadata,
Column('one', String)
)
foo.create(config.db)
foo.drop(config.db)
class _DateFixture(object):
compare = None
@classmethod
def define_tables(cls, metadata):
Table('date_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('date_data', cls.datatype),
)
def test_round_trip(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': self.data}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
compare = self.compare or self.data
eq_(row,
(compare, ))
assert isinstance(row[0], type(compare))
def test_null(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': None}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
eq_(row, (None,))
class DateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime',
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_microseconds',
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396)
class TimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time',
datatype = Time
data = datetime.time(12, 57, 18)
class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time_microseconds',
datatype = Time
data = datetime.time(12, 57, 18, 396)
class DateTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date',
datatype = Date
data = datetime.date(2012, 10, 15)
class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date',
datatype = Date
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
compare = datetime.date(2012, 10, 15)
class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_historic',
datatype = DateTime
data = datetime.datetime(1850, 11, 10, 11, 52, 35)
class DateHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date_historic',
datatype = Date
data = datetime.date(1727, 4, 1)
class NumericTest(fixtures.TestBase):
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
@testing.provide_metadata
def _do_test(self, type_, input_, output, filter_=None, check_scale=False):
metadata = self.metadata
t = Table('t', metadata, Column('x', type_))
t.create()
t.insert().execute([{'x':x} for x in input_])
result = set([row[0] for row in t.select().execute()])
output = set(output)
if filter_:
result = set(filter_(x) for x in result)
output = set(filter_(x) for x in output)
eq_(result, output)
if check_scale:
eq_(
[str(x) for x in result],
[str(x) for x in output],
)
def test_numeric_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_numeric_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563"), None],
[15.7563, None],
)
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None
)
@testing.requires.precision_numerics_general
def test_precision_decimal(self):
numbers = set([
decimal.Decimal("54.234246451650"),
decimal.Decimal("0.004354"),
decimal.Decimal("900.0"),
])
self._do_test(
Numeric(precision=18, scale=12),
numbers,
numbers,
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal(self):
"""test exceedingly small decimals.
Decimal reports values with E notation when the exponent
is greater than 6.
"""
numbers = set([
decimal.Decimal('1E-2'),
decimal.Decimal('1E-3'),
decimal.Decimal('1E-4'),
decimal.Decimal('1E-5'),
decimal.Decimal('1E-6'),
decimal.Decimal('1E-7'),
decimal.Decimal('1E-8'),
decimal.Decimal("0.01000005940696"),
decimal.Decimal("0.00000005940696"),
decimal.Decimal("0.00000000000696"),
decimal.Decimal("0.70000000000696"),
decimal.Decimal("696E-12"),
])
self._do_test(
Numeric(precision=18, scale=14),
numbers,
numbers
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal_large(self):
"""test exceedingly large decimals.
"""
numbers = set([
decimal.Decimal('4E+8'),
decimal.Decimal("5748E+15"),
decimal.Decimal('1.521E+15'),
decimal.Decimal('00000000000000.1E+12'),
])
self._do_test(
Numeric(precision=25, scale=2),
numbers,
numbers
)
@testing.requires.precision_numerics_many_significant_digits
def test_many_significant_digits(self):
numbers = set([
decimal.Decimal("31943874831932418390.01"),
decimal.Decimal("319438950232418390.273596"),
decimal.Decimal("87673.594069654243"),
])
self._do_test(
Numeric(precision=38, scale=12),
numbers,
numbers
)
@testing.requires.precision_numerics_retains_significant_digits
def test_numeric_no_decimal(self):
numbers = set([
decimal.Decimal("1.000")
])
self._do_test(
Numeric(precision=5, scale=3),
numbers,
numbers,
check_scale=True
)
__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest',
'DateTest', 'DateTimeTest', 'TextTest',
'NumericTest',
'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest',
'TimeMicrosecondsTest', 'TimeTest', 'DateTimeMicrosecondsTest',
'DateHistoricTest', 'StringTest')
| mit |
agconti/njode | env/lib/python2.7/site-packages/django/contrib/gis/gdal/srs.py | 35 | 11986 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
tumbl3w33d/ansible | lib/ansible/modules/cloud/vmware/vmware_guest_disk_info.py | 10 | 13626 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, NAER William Leemans (@bushvin) <[email protected]>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_disk_info
short_description: Gather info about disks of given virtual machine
description:
- This module can be used to gather information about disks belonging to given virtual machine.
- All parameters and VMware object names are case sensitive.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde) <[email protected]>
notes:
- Tested on vSphere 6.0 and 6.5.
- Disk UUID information is added in version 2.8.
- Additional information about guest disk backings added in version 2.8.
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the virtual machine.
- This is required parameter, if parameter C(uuid) or C(moid) is not supplied.
type: str
uuid:
description:
- UUID of the instance to gather information if known, this is VMware's unique identifier.
- This is required parameter, if parameter C(name) or C(moid) is not supplied.
type: str
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
folder:
description:
- Destination folder, absolute or relative path to find an existing guest.
- This is required parameter, only if multiple VMs are found with same name.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
datacenter:
description:
- The datacenter name to which virtual machine belongs to.
required: True
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather disk info from virtual machine using UUID
vmware_guest_disk_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: ha-datacenter
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
delegate_to: localhost
register: disk_info
- name: Gather disk info from virtual machine using name
vmware_guest_disk_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: ha-datacenter
validate_certs: no
name: VM_225
delegate_to: localhost
register: disk_info
- name: Gather disk info from virtual machine using moid
vmware_guest_disk_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: ha-datacenter
validate_certs: no
moid: vm-42
delegate_to: localhost
register: disk_info
'''
RETURN = """
guest_disk_info:
description: metadata about the virtual machine's disks
returned: always
type: dict
sample: {
"0": {
"backing_datastore": "datastore2",
"backing_disk_mode": "persistent",
"backing_eagerlyscrub": false,
"backing_filename": "[datastore2] VM_225/VM_225.vmdk",
"backing_thinprovisioned": false,
"backing_type": "FlatVer2",
"backing_writethrough": false,
"backing_uuid": "200C3A00-f82a-97af-02ff-62a595f0020a",
"capacity_in_bytes": 10485760,
"capacity_in_kb": 10240,
"controller_bus_number": 0,
"controller_key": 1000,
"controller_type": "paravirtual",
"key": 2000,
"label": "Hard disk 1",
"summary": "10,240 KB",
"unit_number": 0
},
"1": {
"backing_datastore": "datastore3",
"backing_devicename": "vml.012345678901234567890123456789012345678901234567890123",
"backing_disk_mode": "independent_persistent",
"backing_filename": "[datastore3] VM_226/VM_226.vmdk",
"backing_lunuuid": "012345678901234567890123456789012345678901234567890123",
"backing_type": "RawDiskMappingVer1",
"backing_uuid": null,
"capacity_in_bytes": 15728640,
"capacity_in_kb": 15360,
"controller_bus_number": 0,
"controller_key": 1000,
"controller_type": "paravirtual",
"key": 2001,
"label": "Hard disk 3",
"summary": "15,360 KB",
"unit_number": 1
},
}
"""
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
def gather_disk_info(self, vm_obj):
"""
Gather information about VM's disks
Args:
vm_obj: Managed object of virtual machine
Returns: A list of dict containing disks information
"""
controller_info = dict()
disks_info = dict()
if vm_obj is None:
return disks_info
controller_types = {
vim.vm.device.VirtualLsiLogicController: 'lsilogic',
vim.vm.device.ParaVirtualSCSIController: 'paravirtual',
vim.vm.device.VirtualBusLogicController: 'buslogic',
vim.vm.device.VirtualLsiLogicSASController: 'lsilogicsas',
vim.vm.device.VirtualIDEController: 'ide'
}
controller_index = 0
for controller in vm_obj.config.hardware.device:
if isinstance(controller, tuple(controller_types.keys())):
controller_info[controller_index] = dict(
key=controller.key,
controller_type=controller_types[type(controller)],
bus_number=controller.busNumber,
devices=controller.device
)
controller_index += 1
disk_index = 0
for disk in vm_obj.config.hardware.device:
if isinstance(disk, vim.vm.device.VirtualDisk):
disks_info[disk_index] = dict(
key=disk.key,
label=disk.deviceInfo.label,
summary=disk.deviceInfo.summary,
backing_filename=disk.backing.fileName,
backing_datastore=disk.backing.datastore.name,
controller_key=disk.controllerKey,
unit_number=disk.unitNumber,
capacity_in_kb=disk.capacityInKB,
capacity_in_bytes=disk.capacityInBytes,
)
if isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo):
disks_info[disk_index]['backing_type'] = 'FlatVer1'
disks_info[disk_index]['backing_writethrough'] = disk.backing.writeThrough
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'FlatVer2'
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
disks_info[disk_index]['backing_thinprovisioned'] = bool(disk.backing.thinProvisioned)
disks_info[disk_index]['backing_eagerlyscrub'] = bool(disk.backing.eagerlyScrub)
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.LocalPMemBackingInfo):
disks_info[disk_index]['backing_type'] = 'LocalPMem'
disks_info[disk_index]['backing_volumeuuid'] = disk.backing.volumeUUID
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'PartitionedRawDiskVer2'
disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo):
disks_info[disk_index]['backing_type'] = 'RawDiskMappingVer1'
disks_info[disk_index]['backing_devicename'] = disk.backing.deviceName
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_lunuuid'] = disk.backing.lunUuid
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'RawDiskVer2'
disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo):
disks_info[disk_index]['backing_type'] = 'SeSparse'
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer1BackingInfo):
disks_info[disk_index]['backing_type'] = 'SparseVer1'
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
disks_info[disk_index]['backing_split'] = bool(disk.backing.split)
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer2BackingInfo):
disks_info[disk_index]['backing_type'] = 'SparseVer2'
disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode
disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
disks_info[disk_index]['backing_split'] = bool(disk.backing.split)
disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough)
disks_info[disk_index]['backing_uuid'] = disk.backing.uuid
for controller_index in range(len(controller_info)):
if controller_info[controller_index]['key'] == disks_info[disk_index]['controller_key']:
disks_info[disk_index]['controller_bus_number'] = controller_info[controller_index]['bus_number']
disks_info[disk_index]['controller_type'] = controller_info[controller_index]['controller_type']
disk_index += 1
return disks_info
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
moid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
folder=dict(type='str'),
datacenter=dict(type='str', required=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid', 'moid']
],
supports_check_mode=True,
)
if module.params['folder']:
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
if vm:
# VM exists
try:
module.exit_json(guest_disk_info=pyv.gather_disk_info(vm))
except Exception as exc:
module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc))
else:
# We unable to find the virtual machine user specified
# Bail out
vm_id = (module.params.get('uuid') or module.params.get('moid') or module.params.get('name'))
module.fail_json(msg="Unable to gather disk information for non-existing VM %s" % vm_id)
if __name__ == '__main__':
main()
| gpl-3.0 |
pramsey/mapnik | scons/scons-local-2.3.4/SCons/Tool/midl.py | 9 | 3034 | """SCons.Tool.midl
Tool-specific initialization for midl (Microsoft IDL compiler).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/midl.py 2014/09/27 12:51:43 garyo"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner.IDL
import SCons.Util
from MSCommon import msvc_exists
def midl_emitter(target, source, env):
"""Produces a list of outputs from the MIDL compiler"""
base, ext = SCons.Util.splitext(str(target[0]))
tlb = target[0]
incl = base + '.h'
interface = base + '_i.c'
t = [tlb, incl, interface]
midlcom = env['MIDLCOM']
if midlcom.find('/proxy') != -1:
proxy = base + '_p.c'
t.append(proxy)
if midlcom.find('/dlldata') != -1:
dlldata = base + '_data.c'
t.append(dlldata)
return (t,source)
idl_scanner = SCons.Scanner.IDL.IDLScan()
midl_action = SCons.Action.Action('$MIDLCOM', '$MIDLCOMSTR')
midl_builder = SCons.Builder.Builder(action = midl_action,
src_suffix = '.idl',
suffix='.tlb',
emitter = midl_emitter,
source_scanner = idl_scanner)
def generate(env):
"""Add Builders and construction variables for midl to an Environment."""
env['MIDL'] = 'MIDL.EXE'
env['MIDLFLAGS'] = SCons.Util.CLVar('/nologo')
env['MIDLCOM'] = '$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} /h ${TARGETS[1]} /iid ${TARGETS[2]} /proxy ${TARGETS[3]} /dlldata ${TARGETS[4]} $SOURCE 2> NUL'
env['BUILDERS']['TypeLibrary'] = midl_builder
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
Tesla-Redux-Devices/kernel_lge_g3 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
blue236/ardupilot | Tools/scripts/magfit_flashlog.py | 278 | 4744 | #!/usr/bin/env python
''' fit best estimate of magnetometer offsets from ArduCopter flashlog
using the algorithm from Bill Premerlani
'''
import sys, time, os, math
# command line option handling
from optparse import OptionParser
parser = OptionParser("magfit_flashlog.py [options]")
parser.add_option("--verbose", action='store_true', default=False, help="verbose offset output")
parser.add_option("--gain", type='float', default=0.01, help="algorithm gain")
parser.add_option("--noise", type='float', default=0, help="noise to add")
parser.add_option("--max-change", type='float', default=10, help="max step change")
parser.add_option("--min-diff", type='float', default=50, help="min mag vector delta")
parser.add_option("--history", type='int', default=20, help="how many points to keep")
parser.add_option("--repeat", type='int', default=1, help="number of repeats through the data")
(opts, args) = parser.parse_args()
from rotmat import Vector3, Matrix3
if len(args) < 1:
print("Usage: magfit_flashlog.py [options] <LOGFILE...>")
sys.exit(1)
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * opts.noise
def find_offsets(data, ofs):
'''find mag offsets by applying Bills "offsets revisited" algorithm
on the data
This is an implementation of the algorithm from:
http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf
'''
# a limit on the maximum change in each step
max_change = opts.max_change
# the gain factor for the algorithm
gain = opts.gain
data2 = []
for d in data:
d = d.copy() + noise()
d.x = float(int(d.x + 0.5))
d.y = float(int(d.y + 0.5))
d.z = float(int(d.z + 0.5))
data2.append(d)
data = data2
history_idx = 0
mag_history = data[0:opts.history]
for i in range(opts.history, len(data)):
B1 = mag_history[history_idx] + ofs
B2 = data[i] + ofs
diff = B2 - B1
diff_length = diff.length()
if diff_length <= opts.min_diff:
# the mag vector hasn't changed enough - we don't get any
# information from this
history_idx = (history_idx+1) % opts.history
continue
mag_history[history_idx] = data[i]
history_idx = (history_idx+1) % opts.history
# equation 6 of Bills paper
delta = diff * (gain * (B2.length() - B1.length()) / diff_length)
# limit the change from any one reading. This is to prevent
# single crazy readings from throwing off the offsets for a long
# time
delta_length = delta.length()
if max_change != 0 and delta_length > max_change:
delta *= max_change / delta_length
# set the new offsets
ofs = ofs - delta
if opts.verbose:
print ofs
return ofs
def plot_corrected_field(filename, data, offsets):
f = open(filename, mode='w')
for d in data:
corrected = d + offsets
f.write("%.1f\n" % corrected.length())
f.close()
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
flog = open(filename, mode='r')
data = []
data_no_motors = []
mag = None
offsets = None
# now gather all the data
for line in flog:
if not line.startswith('COMPASS,'):
continue
line = line.rstrip()
line = line.replace(' ', '')
a = line.split(',')
ofs = Vector3(float(a[4]), float(a[5]), float(a[6]))
if offsets is None:
initial_offsets = ofs
offsets = ofs
motor_ofs = Vector3(float(a[7]), float(a[8]), float(a[9]))
mag = Vector3(float(a[1]), float(a[2]), float(a[3]))
mag = mag - offsets
data.append(mag)
data_no_motors.append(mag - motor_ofs)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % initial_offsets)
# run the fitting algorithm
ofs = initial_offsets
for r in range(opts.repeat):
ofs = find_offsets(data, ofs)
plot_corrected_field('plot.dat', data, ofs)
plot_corrected_field('initial.dat', data, initial_offsets)
plot_corrected_field('zero.dat', data, Vector3(0,0,0))
plot_corrected_field('hand.dat', data, Vector3(-25,-8,-2))
plot_corrected_field('zero-no-motors.dat', data_no_motors, Vector3(0,0,0))
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs)
total = 0.0
for filename in args:
magfit(filename)
| gpl-3.0 |
marcoarruda/MissionPlanner | Lib/site-packages/numpy/testing/utils.py | 53 | 47161 | """
Utility function to facilitate testing.
"""
import os
import sys
import re
import operator
import types
import warnings
from nosetester import import_nose
__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp',
'assert_array_max_ulp', 'assert_warns', 'assert_allclose']
verbose = 0
def assert_(val, msg='') :
"""
Assert that works in release mode.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
if not val :
raise AssertionError(msg)
def gisnan(x):
"""like isnan, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isnan and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
if isinstance(st, types.NotImplementedType):
raise TypeError("isnan not supported for this type")
return st
def gisfinite(x):
"""like isfinite, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isfinite and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isfinite, seterr
err = seterr(invalid='ignore')
try:
st = isfinite(x)
if isinstance(st, types.NotImplementedType):
raise TypeError("isfinite not supported for this type")
finally:
seterr(**err)
return st
def gisinf(x):
"""like isinf, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isinf and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isinf, seterr
err = seterr(invalid='ignore')
try:
st = isinf(x)
if isinstance(st, types.NotImplementedType):
raise TypeError("isinf not supported for this type")
finally:
seterr(**err)
return st
def rand(*args):
"""Returns an array of random numbers with the given shape.
This only uses the standard library, so it is useful for testing purposes.
"""
import random
from numpy.core import zeros, float64
results = zeros(args, float64)
f = results.flat
for i in range(len(f)):
f[i] = random.random()
return results
if sys.platform[:5]=='linux':
def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),
_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc. """
import time
if not _load_time:
_load_time.append(time.time())
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[13])
except:
return int(100*(time.time()-_load_time[0]))
def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())):
""" Return virtual memory size in bytes of the running python.
"""
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[22])
except:
return
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. [Emulation with time.time]. """
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def memusage():
""" Return memory usage of running python. [Not implemented]"""
raise NotImplementedError
if os.name=='nt' and sys.version[:3] > '2.3':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance = None,
inum=-1, format = None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None: format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine,object,instance, None, inum,counter) )
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True,
names=('ACTUAL', 'DESIRED')):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
try:
r = repr(a)
except:
r = '[repr failed]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(' %s: %s' % (names[i], r))
return '\n'.join(msg)
def assert_equal(actual,desired,err_msg='',verbose=True):
"""
Raise an assertion if two objects are not equal.
Given two objects (lists, tuples, dictionaries or numpy arrays), check
that all elements of these objects are equal. An exception is raised at
the first conflicting values.
Parameters
----------
actual : list, tuple, dict or ndarray
The object to check.
desired : list, tuple, dict or ndarray
The expected object.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
...
<type 'exceptions.AssertionError'>:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
"""
if isinstance(desired, dict):
if not isinstance(actual, dict) :
raise AssertionError(repr(type(actual)))
assert_equal(len(actual),len(desired),err_msg,verbose)
for k,i in desired.items():
if k not in actual :
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k,err_msg), verbose)
return
if isinstance(desired, (list,tuple)) and isinstance(actual, (list,tuple)):
assert_equal(len(actual),len(desired),err_msg,verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose)
return
from numpy.core import ndarray, isscalar, signbit
from numpy.lib import iscomplexobj, real, imag
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_equal(actualr, desiredr)
assert_equal(actuali, desiredi)
except AssertionError:
raise AssertionError(msg)
# Inf/nan/negative zero handling
try:
# isscalar test to check cases such as [np.nan] != np.nan
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
isdesnan = gisnan(desired)
isactnan = gisnan(actual)
if isdesnan or isactnan:
if not (isdesnan and isactnan):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
elif desired == 0 and actual == 0:
if not signbit(desired) == signbit(actual):
raise AssertionError(msg)
# If TypeError or ValueError raised while using isnan and co, just handle
# as before
except (TypeError, ValueError, NotImplementedError):
pass
if desired != actual :
raise AssertionError(msg)
def print_assert_equal(test_string,actual,desired):
"""
Test if two objects are equal, and print an error message if test fails.
The test is performed with ``actual == desired``.
Parameters
----------
test_string : str
The message supplied to AssertionError.
actual : object
The object to test for equality against `desired`.
desired : object
The expected result.
Examples
--------
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
Traceback (most recent call last):
...
AssertionError: Test XYZ of func xyz failed
ACTUAL:
[0, 1]
DESIRED:
[0, 2]
"""
import pprint
try:
assert(actual == desired)
except AssertionError:
import cStringIO
msg = cStringIO.StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual,msg)
msg.write('DESIRED: \n')
pprint.pprint(desired,msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raise an assertion if two items are not equal up to desired precision.
The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal)
Given two objects (numbers or ndarrays), check that all elements of these
objects are almost equal. An exception is raised at conflicting values.
For ndarrays this delegates to assert_array_almost_equal
Parameters
----------
actual : number or ndarray
The object to check.
desired : number or ndarray
The expected object.
decimal : integer (decimal=7)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal: compares array_like objects
assert_equal: tests objects for equality
Examples
--------
>>> import numpy.testing as npt
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
...
<type 'exceptions.AssertionError'>:
Items are not equal:
ACTUAL: 2.3333333333333002
DESIRED: 2.3333333399999998
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
\t\t\tnp.array([1.0,2.33333334]), decimal=9)
...
<type 'exceptions.AssertionError'>:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333333])
y: array([ 1. , 2.33333334])
"""
from numpy.core import ndarray
from numpy.lib import iscomplexobj, real, imag
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
msg = build_err_msg([actual, desired], err_msg, verbose=verbose,
header='Arrays are not almost equal')
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_almost_equal(actualr, desiredr, decimal=decimal)
assert_almost_equal(actuali, desiredi, decimal=decimal)
except AssertionError:
raise AssertionError(msg)
if isinstance(actual, (ndarray, tuple, list)) \
or isinstance(desired, (ndarray, tuple, list)):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (NotImplementedError, TypeError):
pass
if round(abs(desired - actual),decimal) != 0 :
raise AssertionError(msg)
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raise an assertion if two items are not equal up to significant digits.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : number
The object to check.
desired : number
The expected object.
significant : integer (significant=7)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_almost_equal: compares objects by decimals
assert_array_almost_equal: compares array_like objects by decimals
assert_equal: tests objects for equality
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
significant=8)
...
<type 'exceptions.AssertionError'>:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-021
DESIRED: 1.2345672000000001e-021
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
import numpy as np
actual, desired = map(float, (actual, desired))
if desired==actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
err = np.seterr(invalid='ignore')
try:
scale = 0.5*(np.abs(desired) + np.abs(actual))
scale = np.power(10,np.floor(np.log10(scale)))
finally:
np.seterr(**err)
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg([actual, desired], err_msg,
header='Items are not equal to %d significant digits:' %
significant,
verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (TypeError, NotImplementedError):
pass
if np.abs(sc_desired - sc_actual) >= np.power(10.,-(significant-1)) :
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header=''):
from numpy.core import array, isnan, any
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPfdgFDG'
try:
cond = (x.shape==() or y.shape==()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
if (isnumber(x) and isnumber(y)) and (any(isnan(x)) or any(isnan(y))):
# Handling nan: we first check that x and y have the nan at the
# same locations, and then we mask the nan and do the comparison as
# usual.
xnanid = isnan(x)
ynanid = isnan(y)
try:
assert_array_equal(xnanid, ynanid)
except AssertionError:
msg = build_err_msg([x, y],
err_msg
+ '\n(x and y nan location mismatch %s, ' \
'%s mismatch)' % (xnanid, ynanid),
verbose=verbose, header=header,
names=('x', 'y'))
raise AssertionError(msg)
# If only one item, it was a nan, so just return
if x.size == y.size == 1:
return
val = comparison(x[~xnanid], y[~ynanid])
else:
val = comparison(x,y)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
except ValueError:
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'))
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Raise an assertion if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_almost_equal: test objects for equality up to precision
assert_equal: tests objects for equality
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
\t\t\t[np.exp(0),2.33333, np.nan])
assert fails with numerical inprecision with floats
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
\t\t\t[1, np.sqrt(np.pi)**2, np.nan])
...
<type 'exceptions.ValueError'>:
AssertionError:
Arrays are not equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 3.14159265, NaN])
y: array([ 1. , 3.14159265, NaN])
use assert_array_almost_equal for these cases instead
>>> np.testing.assert_array_almost_equal([1.0,np.pi,np.nan],
\t\t\t[1, np.sqrt(np.pi)**2, np.nan], decimal=15)
"""
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raise an assertion if two objects are not equal up to desired precision.
The test verifies identical shapes and verifies values with
abs(desired-actual) < 0.5 * 10**(-decimal)
Given two array_like objects, check that the shape is equal and all
elements of these objects are almost equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : integer (decimal=6)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_almost_equal: simple version for comparing numbers
assert_array_equal: tests objects for equality
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
[1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33339,np.nan], decimal=5)
...
<type 'exceptions.AssertionError'>:
AssertionError:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33339, NaN])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33333, 5], decimal=5)
<type 'exceptions.ValueError'>:
ValueError:
Arrays are not almost equal
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33333, 5. ])
"""
from numpy.core import around, number, float_
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
def compare(x, y):
try:
if npany(gisinf(x)) or npany( gisinf(y)):
xinfid = gisinf(x)
yinfid = gisinf(y)
if not xinfid == yinfid:
return False
# if one item, x and y is +- inf
if x.size == y.size == 1:
return x == y
x = x[~xinfid]
y = y[~yinfid]
except (TypeError, NotImplementedError):
pass
z = abs(x-y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return around(z, decimal) <= 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal')
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Raise an assertion if two array_like objects are not ordered by less than.
Given two array_like objects, check that the shape is equal and all
elements of the first object are strictly smaller than those of the
second object. An exception is raised at shape mismatch or incorrectly
ordered values. Shape mismatch does not raise if an object has zero
dimension. In contrast to the standard usage in numpy, NaNs are
compared, no assertion is raised if both objects have NaNs in the same
positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Examples
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 1., NaN])
y: array([ 1., 2., NaN])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(shapes (3,), (1,) mismatch)
x: array([ 1., 2., 3.])
y: array([4])
"""
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered')
def runstring(astr, dict):
exec astr in dict
def assert_string_equal(actual, desired):
"""
Test if two strings are equal.
If the given strings are equal, `assert_string_equal` does nothing.
If they are not equal, an AssertionError is raised, and the diff
between the strings is shown.
Parameters
----------
actual : str
The string to test for equality against the expected string.
desired : str
The expected string.
Examples
--------
>>> np.testing.assert_string_equal('abc', 'abc')
>>> np.testing.assert_string_equal('abc', 'abcd')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
AssertionError: Differences in strings:
- abc+ abcd? +
"""
# delay import of difflib to reduce startup time
import difflib
if not isinstance(actual, str) :
raise AssertionError(`type(actual)`)
if not isinstance(desired, str):
raise AssertionError(`type(desired)`)
if re.match(r'\A'+desired+r'\Z', actual, re.M): return
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ ') :
raise AssertionError(`d2`)
l.append(d2)
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
continue
diff_list.extend(l)
continue
raise AssertionError(`d1`)
if not diff_list:
return
msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
if actual != desired :
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""
Run doctests found in the given file.
By default `rundocs` raises an AssertionError on failure.
Parameters
----------
filename : str
The path to the file for which the doctests are run.
raise_on_error : bool
Whether to raise an AssertionError when a doctest fails. Default is
True.
Notes
-----
The doctests can be run by the user/developer by adding the ``doctests``
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`::
>>> np.lib.test(doctests=True)
"""
import doctest, imp
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
path = [os.path.dirname(filename)]
file, pathname, description = imp.find_module(name, path)
try:
m = imp.load_module(name, file, pathname, description)
finally:
file.close()
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = lambda s: msg.append(s)
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def raises(*args,**kwargs):
nose = import_nose()
return nose.tools.raises(*args,**kwargs)
def assert_raises(*args,**kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
nose = import_nose()
return nose.tools.assert_raises(*args,**kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = filter(isfunction, cls_attr.values())
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str,times=1,label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
The supplied code string is compiled with the Python builtin ``compile``.
The precision of the timing is 10 milli-seconds. If the code will execute
fast on this timescale, it can be executed many times to get reasonable
timing accuracy.
Parameters
----------
code_str : str
The code to be timed.
times : int, optional
The number of times the code is executed. Default is 1. The code is
only compiled once.
label : str, optional
A label to identify `code_str` with. This is passed into ``compile``
as the second argument (for run-time error messages).
Returns
-------
elapsed : float
Total elapsed time in seconds for executing `code_str` `times` times.
Examples
--------
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
... times=times)
>>> print "Time for a single execution : ", etime / times, "s"
Time for a single execution : 0.005 s
"""
frame = sys._getframe(1)
locs,globs = frame.f_locals,frame.f_globals
code = compile(code_str,
'Test name: %s ' % label,
'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec code in globs,locs
elapsed = jiffies() - elapsed
return 0.01*elapsed
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
import numpy as np
a = np.arange(100 * 100)
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
rc = sys.getrefcount(i)
for j in range(15):
d = op(b,c)
assert(sys.getrefcount(i) >= rc)
def assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
The test is equivalent to ``allclose(actual, desired, rtol, atol)``
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired
rtol : float, optional
Relative tolerance
atol : float, optional
Absolute tolerance
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
"""
import numpy as np
def compare(x, y):
return np.allclose(x, y, rtol=rtol, atol=atol)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
verbose=verbose, header=header)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulps * spacing(max(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1., 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
------------------------------------------------------------
Traceback (most recent call last):
...
AssertionError: X and Y are not equal to 1 ULP (max is 2)
"""
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x-y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = "X and Y are not equal to %d ULP" % nulp
else:
max_nulp = np.max(nulp_diff(x, y))
msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g ULP" % \
maxulp)
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, eeturn the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
Returns
-------
nulp: array_like
number of representable floating point numbers between each item in x
and y.
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
else:
x = np.array(x)
y = np.array(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array(x, dtype=t)
y = np.array(y, dtype=t)
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" % \
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.array(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx<0]
else:
if rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation of
x."""
import numpy as np
if x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
else:
raise ValueError("Unsupported dtype %s" % x.dtype)
# The following two classes are copied from python 2.6 warnings module (context
# manager)
class WarningMessage(object):
"""
Holds the result of a single showwarning() call.
Notes
-----
`WarningMessage` is copied from the Python 2.6 warnings module,
so it can be used in NumPy with older Python versions.
"""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class WarningManager:
"""
A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of ``warnings.showwarning()`` and be appended to a
list returned by the context manager. Otherwise None is returned by the
context manager. The objects appended to the list are arguments whose
attributes mirror the arguments to ``showwarning()``.
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
Notes
-----
`WarningManager` is a copy of the ``catch_warnings`` context manager
from the Python 2.6 warnings module, with slight modifications.
It is copied so it can be used in NumPy with older Python versions.
"""
def __init__(self, record=False, module=None):
self._record = record
if module is None:
self._module = sys.modules['warnings']
else:
self._module = module
self._entered = False
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
def assert_warns(warning_class, func, *args, **kw):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught, and the
test case will be deemed to have suffered an error.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
None
"""
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
ctx = WarningManager(record=True)
l = ctx.__enter__()
warnings.simplefilter('always')
try:
func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a " \
"%s( is %s)" % (func.__name__, warning_class, l[0]))
finally:
ctx.__exit__()
| gpl-3.0 |
auready/django | tests/gis_tests/tests.py | 22 | 4106 | import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError
try:
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
except ImproperlyConfigured as e:
# If psycopg is installed but not geos, the import path hits
# django.contrib.gis.geometry.backend which will "helpfully" convert
# an ImportError into an ImproperlyConfigured.
# Here, we make sure we're only catching this specific case and not another
# ImproperlyConfigured one.
if e.args and e.args[0].startswith('Could not import user-defined GEOMETRY_BACKEND'):
HAS_POSTGRES = False
else:
raise
if HAS_POSTGRES:
class FakeConnection:
def __init__(self):
self.settings_dict = {
'NAME': 'test',
}
class FakePostGISOperations(PostGISOperations):
def __init__(self, version=None):
self.version = version
self.connection = FakeConnection()
def _get_postgis_func(self, func):
if func == 'postgis_lib_version':
if self.version is None:
raise ProgrammingError
else:
return self.version
elif func == 'version':
pass
else:
raise NotImplementedError('This function was not expected to be called')
@unittest.skipUnless(HAS_POSTGRES, "The psycopg2 driver is needed for these tests")
class TestPostGISVersionCheck(unittest.TestCase):
"""
The PostGIS version check parses correctly the version numbers
"""
def test_get_version(self):
expect = '1.0.0'
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ('1.2.3', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ('1.2.3dev', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
('1.3.0', 1, 3, 0),
('2.1.1', 2, 1, 1),
('2.2.0dev', 2, 2, 0),
]
for version in versions:
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_invalid_version_numbers(self):
versions = ['nope', '123']
for version in versions:
ops = FakePostGISOperations(version)
with self.assertRaises(Exception):
ops.spatial_version
def test_no_version_number(self):
ops = FakePostGISOperations()
with self.assertRaises(ImproperlyConfigured):
ops.spatial_version
def test_version_dependent_funcs(self):
"""
Resolve names of functions renamed and deprecated in PostGIS 2.2.0
depending on PostGIS version.
Remove when dropping support for PostGIS 2.1.
"""
ops = FakePostGISOperations('2.2.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_DistanceSphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_DistanceSpheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_LengthSpheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_MemSize')
ops = FakePostGISOperations('2.1.0')
self.assertEqual(ops.spatial_function_name('DistanceSphere'), 'ST_distance_sphere')
self.assertEqual(ops.spatial_function_name('DistanceSpheroid'), 'ST_distance_spheroid')
self.assertEqual(ops.spatial_function_name('LengthSpheroid'), 'ST_length_spheroid')
self.assertEqual(ops.spatial_function_name('MemSize'), 'ST_mem_size')
| bsd-3-clause |
Nosferatul/coala | tests/results/AbsolutePositionTest.py | 35 | 2511 | import unittest
from coalib.results.AbsolutePosition import AbsolutePosition, calc_line_col
from coalib.misc.Constants import COMPLEX_TEST_STRING
class AbsolutePositionTest(unittest.TestCase):
def test_calc_line_col_newlines(self):
# no newlines
text = ("find position of 'z'",)
z_pos = text[0].find('z')
self.assertEqual(
calc_line_col(text, z_pos), (1, z_pos + 1))
# newline
text = ('find position of\n', "'z'",)
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos), (2, 2))
def test_calc_line_col_unicode(self):
uni_pos = COMPLEX_TEST_STRING.find('↑')
self.assertEqual(
calc_line_col((COMPLEX_TEST_STRING,), uni_pos),
(1, uni_pos + 1))
def test_calc_line_col_rawstrings(self):
for raw in [(r'a\b',), (r'a\n',), ('a\\n',)]:
pos = raw[0].find(raw[0][-1])
self.assertEqual(calc_line_col(raw, pos), (1, 3))
def test_calc_line_col_extremes(self):
# End of Line
text = ('Fitst Line\n', 'End of sencond line z')
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos),
(2, len(text[1])))
# Out of text
with self.assertRaises(ValueError):
text = ('Some line')
calc_line_col(text, 50)
# start of line
text = ('First Line\n', 'zEnd of sencond line')
string_text = ''.join(text)
z_pos = string_text.find('z')
self.assertEqual(calc_line_col(text, z_pos), (2, 1))
def test_property(self):
uut = AbsolutePosition(('1', '2'), 1)
self.assertEqual(uut.position, 1)
self.assertEqual(uut.line, 2)
self.assertEqual(uut.column, 1)
uut = AbsolutePosition()
self.assertEqual(uut.position, None)
self.assertEqual(uut.line, None)
self.assertEqual(uut.column, None)
uut = AbsolutePosition(('a\n', 'b\n'), 0)
self.assertEqual(uut.position, 0)
self.assertEqual(uut.line, 1)
self.assertEqual(uut.column, 1)
def test_instantiation(self):
with self.assertRaises(ValueError):
uut = AbsolutePosition((), 0)
uut = AbsolutePosition(position=5)
self.assertEqual(uut.position, 5)
self.assertEqual(uut.line, None)
self.assertEqual(uut.column, None)
| agpl-3.0 |
40223225/2015-cdb_g3-40223225 | static/Brython3.1.1-20150328-091302/Lib/fnmatch.py | 894 | 3163 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.