repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
johankaito/fufuka | microblog/venv/lib/python2.7/site-packages/requests/packages/urllib3/packages/six.py | 2375 | 11628 | """Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| apache-2.0 |
elemel/tics | lib/tics/image.py | 1 | 3627 | # Copyright (c) 2009 Mikael Lind
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import random, struct
from OpenGL.GL import *
from tics.triangle import Triangle
class Image(object):
def __init__(self, (width, height), triangles):
self.__width = width
self.__height = height
self.__triangles = tuple(triangles)
@property
def resolution(self):
return self.__width, self.__height
def draw(self):
glBegin(GL_TRIANGLES)
for triangle in self.__triangles:
triangle.draw()
glEnd()
@staticmethod
def generate(resolution, triangle_count):
triangles = [Triangle.generate() for _ in xrange(triangle_count)]
return Image(resolution, triangles)
@staticmethod
def read(f):
width, height, triangle_count = struct.unpack("!HHH", f.read(6))
triangles = [Triangle.read(f) for _ in xrange(triangle_count)]
return Image((width, height), triangles)
@staticmethod
def load(path):
f = open(path, "rb")
try:
return Image.read(f)
finally:
f.close()
def write(self, f):
f.write(struct.pack("!HHH", self.__width, self.__height,
len(self.__triangles)))
for triangle in self.__triangles:
triangle.write(f)
def save(self, path):
f = open(path, "wb")
try:
self.write(f)
finally:
f.close()
def mutate(self):
if random.random() < 0.5:
triangles = list(self.__triangles)
i = random.randrange(len(triangles))
triangles[i] = triangles[i].mutate()
return Image((self.__width, self.__height), triangles)
else:
mutate_func = random.choice([self.__move_triangle,
self.__replace_triangle])
return mutate_func()
def __move_triangle(self):
triangles = list(self.__triangles)
i = random.randrange(len(triangles))
j = random.randrange(len(triangles))
triangle = triangles.pop(i)
triangles.insert(j, triangle)
return Image((self.__width, self.__height), triangles)
def __replace_triangle(self):
triangles = list(self.__triangles)
i = random.randrange(len(triangles))
if random.random() < 0.5:
j = len(triangles)
else:
j = random.randrange(len(triangles))
triangles.pop(i)
triangles.insert(j, Triangle.generate())
return Image((self.__width, self.__height), triangles)
| mit |
yxwzaxns/cowry | server/core/utils.py | 1 | 4227 | """functions helper."""
from ast import literal_eval
import base64
import os
import hashlib
import random
import uuid
import time
import shutil
import re
import socket
import _thread
import OpenSSL
import redis
def addAppPath(path):
"""Add a path to sys path."""
os.sys.path.append(path)
def getCwd():
"""pass."""
return os.getcwd()
def checkAbsPath(path):
"""pass."""
return os.path.isabs(path)
def prettySize(num, suffix='B'):
"""pass."""
num = int(num)
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "{:.3f} {}{}".format(num, unit, suffix)
num /= 1024.0
def getSizeByPath(filepath):
"""pass."""
return os.path.getsize(filepath)
def getBaseNameByPath(filepath):
"""pass."""
return os.path.basename(filepath)
def getDirNameByPath(filepath):
"""pass."""
return os.path.dirname(filepath)
def calculateHashCodeForFile(filepath):
"""pass."""
try:
with open(filepath, 'rb') as f:
fileHashCode = hashlib.md5(f.read()).hexdigest()
except Exception as e:
return (1, str(e))
return fileHashCode
def calculateHashCodeForString(string, method='md5'):
"""pass."""
return getattr(hashlib, method)(string.encode('utf8')).hexdigest()
# return hashlib.md5(str.encode('utf8')).hexdigest()
def calculateFingerprintForSSHKey(line):
key = base64.b64decode(line.strip().split()[1].encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a+b for a,b in zip(fp_plain[::2], fp_plain[1::2]))
def check_public_key(key):
# key = base64.b64decode(line.strip().split()[1].encode('ascii'))
# fp_plain = hashlib.md5(key).hexdigest()
return True
def generateRandomDigitFromRange(start, end):
"""pass."""
return random.randrange(start, end)
def rebuildDictFromBytes(bytestr):
"""pass."""
return literal_eval(bytestr.decode('utf8'))
def startNewThread(work, params=()):
"""pass."""
if params:
_thread.start_new_thread(work, params)
else:
_thread.start_new_thread(work, ())
def seperateFileName(filename):
"""pass."""
return os.path.splitext(filename)
def getFileContent(filepath, method= ''):
"""pass."""
mode = 'r{}'.format(method)
with open(filepath, mode) as f:
content = f.read()
return content
def generateAuthToken():
"""pass."""
return uuid.uuid4().hex.upper()
def generateGUID():
return uuid.uuid1().hex.upper()
def getCurrentTime():
"""pass."""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def joinFilePath(*params):
"""pass."""
params = [x for x in params]
return os.path.join(*params)
def deleteFile(filepath):
"""pass."""
try:
os.remove(filepath)
except Exception as e:
return (1, str(e))
else:
return (0, 'ok')
def copyfile(src, dst):
"""pass."""
try:
shutil.copyfile(src, dst)
except Exception as e:
return (1, str(e))
else:
return (0, 'ok')
def getenv(name):
"""pass."""
return os.getenv(name)
def setenv(name, value):
"""pass."""
os.environ[name] = str(value)
def makeDirs(filepath):
"""pass."""
return os.makedirs(filepath)
def delfolder(folderpath):
"""pass."""
if checkFolderExists(folderpath):
shutil.rmtree(folderpath)
def checkFileExists(filepath):
"""pass."""
return os.path.isfile(filepath)
def checkFolderExists(path):
"""pass."""
return os.path.isdir(path)
def verifyDomain(domain):
"""pass."""
reg = r'^[a-z0-9]([a-z0-9-]+\.){1,}[a-z0-9]+\Z'
return re.search(reg, domain)
def getHostAddr():
"""pass."""
return socket.gethostbyname(socket.gethostname())
def importCert(path):
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, path)
def getCertInfo(path):
filehash = calculateHashCodeForFile(path)
with open(path, 'r') as f:
certfile = f.read()
cert = importCert(certfile)
cert_digest = cert.digest("sha256")
cert_info = {'digest': cert_digest.decode(),
'filehash': filehash}
return cert_info
def send_info(info):
pass
| mit |
xorpaul/check_mk | web/plugins/userdb/user_attributes.py | 6 | 3483 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
declare_user_attribute(
"force_authuser",
Checkbox(
title = _("Visibility of Hosts/Services"),
label = _("Only show hosts and services the user is a contact for"),
help = _("When this option is checked, then the status GUI will only "
"display hosts and services that the user is a contact for - "
"even if he has the permission for seeing all objects."),
),
permission = "general.see_all"
)
declare_user_attribute(
"force_authuser_webservice",
Checkbox(
title = _("Visibility of Hosts/Services (Webservice)"),
label = _("Export only hosts and services the user is a contact for"),
help = _("When this option is checked, then the Multisite webservice "
"will only export hosts and services that the user is a contact for - "
"even if he has the permission for seeing all objects."),
),
permission = "general.see_all"
)
declare_user_attribute(
"disable_notifications",
Checkbox(
title = _("Disable Notifications"),
label = _("Temporarily disable <b>all</b> notifications!"),
help = _("When this option is active the you will not get <b>any</b> "
"alerts or other notifications via email, SMS or similar. "
"This overrides all other notification settings or rules, so make "
"sure that you know what you do."),
),
permission = "general.disable_notifications",
domain = "check_mk",
)
declare_user_attribute(
"start_url",
TextAscii(title = _("Start-URL to display in main frame"),
help = _("When you point your browser to the Multisite GUI, usually the dashboard "
"is shown in the main (right) frame. You can replace this with any other "
"URL you like here."),
size = 80,
default_value = "dashboard.py",
attrencode = True),
domain = "multisite")
| gpl-2.0 |
quarkslab/irma | probe/modules/antivirus/bitdefender/bitdefender.py | 1 | 2967 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
import os
import tempfile
from pathlib import Path
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class BitdefenderForUnices(AntivirusUnix):
name = "Bitdefender Antivirus Scanner (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# create a temporary filename
fd, self._log_path = tempfile.mkstemp()
self._log_path = Path(self._log_path)
os.close(fd)
# scan tool variables
self.scan_args = (
"--action=ignore", # action to take for an infected file
"--no-list", # do not display scanned files
"--log={log}".format(log=self._log_path)
)
self.scan_patterns = [
re.compile('(?P<file>\S+)\s+(infected:|suspected:)\s+'
'(?P<name>.+?)$', re.IGNORECASE | re.MULTILINE),
]
def __del__(self):
if hasattr(self, '_log_path') and self._log_path.exists():
self._log_path.unlink()
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def check_scan_results(self, paths, res):
retcode, _, stderr = res
stdout = self._log_path.read_text()
return super().check_scan_results(paths, (retcode, stdout, stderr))
def get_version(self):
"""return the version of the antivirus"""
return self._run_and_parse(
'--version',
regexp='(?P<version>\d+(\.\d+)+)',
group='version')
def get_database(self):
"""return list of files in the database"""
# extract folder where are installed definition files
search_paths = [
Path('/opt/BitDefender-scanner/var/lib/scan/Plugins/'),
]
return self.locate('*', search_paths, syspath=False)
def get_scan_path(self):
"""return the full path of the scan tool"""
return self.locate_one("bdscan")
def get_virus_database_version(self):
"""Return the Virus Database version"""
self._run_and_parse(
'--info',
regexp='Engine signatures: (?P<dbversion>\d+)',
group='dbversion')
| apache-2.0 |
shankar1093/Graphics | mrdoob-three.js-1f968fe/utils/converters/obj/split_obj.py | 369 | 12687 | """Split single OBJ model into mutliple OBJ files by materials
-------------------------------------
How to use
-------------------------------------
python split_obj.py -i infile.obj -o outfile
Will generate:
outfile_000.obj
outfile_001.obj
...
outfile_XXX.obj
-------------------------------------
Parser based on format description
-------------------------------------
http://en.wikipedia.org/wiki/Obj
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
TRUNCATE = False
SCALE = 1.0
# #####################################################
# Templates
# #####################################################
TEMPLATE_OBJ = u"""\
################################
# OBJ generated by split_obj.py
################################
# Faces: %(nfaces)d
# Vertices: %(nvertices)d
# Normals: %(nnormals)d
# UVs: %(nuvs)d
################################
# vertices
%(vertices)s
# normals
%(normals)s
# uvs
%(uvs)s
# faces
%(faces)s
"""
TEMPLATE_VERTEX = "v %f %f %f"
TEMPLATE_VERTEX_TRUNCATE = "v %d %d %d"
TEMPLATE_NORMAL = "vn %.5g %.5g %.5g"
TEMPLATE_UV = "vt %.5g %.5g"
TEMPLATE_FACE3_V = "f %d %d %d"
TEMPLATE_FACE4_V = "f %d %d %d %d"
TEMPLATE_FACE3_VT = "f %d/%d %d/%d %d/%d"
TEMPLATE_FACE4_VT = "f %d/%d %d/%d %d/%d %d/%d"
TEMPLATE_FACE3_VN = "f %d//%d %d//%d %d//%d"
TEMPLATE_FACE4_VN = "f %d//%d %d//%d %d//%d %d//%d"
TEMPLATE_FACE3_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d"
TEMPLATE_FACE4_VTN = "f %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d"
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v': v, 't': t, 'n': n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
vertex_index.append(vertex['v'])
if vertex['t']:
uv_index.append(vertex['t'])
if vertex['n']:
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl" and len(chunks) == 2:
material = chunks[1]
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #############################################################################
# API - Breaker
# #############################################################################
def break_obj(infile, outfile):
"""Break infile.obj to outfile.obj
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
# sort faces by materials
chunks = {}
for face in faces:
material = face["material"]
if not material in chunks:
chunks[material] = {"faces": [], "vertices": set(), "normals": set(), "uvs": set()}
chunks[material]["faces"].append(face)
# extract unique vertex / normal / uv indices used per chunk
for material in chunks:
chunk = chunks[material]
for face in chunk["faces"]:
for i in face["vertex"]:
chunk["vertices"].add(i)
for i in face["normal"]:
chunk["normals"].add(i)
for i in face["uv"]:
chunk["uvs"].add(i)
# generate new OBJs
for mi, material in enumerate(chunks):
chunk = chunks[material]
# generate separate vertex / normal / uv index lists for each chunk
# (including mapping from original to new indices)
# get well defined order
new_vertices = list(chunk["vertices"])
new_normals = list(chunk["normals"])
new_uvs = list(chunk["uvs"])
# map original => new indices
vmap = {}
for i, v in enumerate(new_vertices):
vmap[v] = i + 1
nmap = {}
for i, n in enumerate(new_normals):
nmap[n] = i + 1
tmap = {}
for i, t in enumerate(new_uvs):
tmap[t] = i + 1
# vertices
pieces = []
for i in new_vertices:
vertex = vertices[i-1]
txt = TEMPLATE_VERTEX % (vertex[0], vertex[1], vertex[2])
pieces.append(txt)
str_vertices = "\n".join(pieces)
# normals
pieces = []
for i in new_normals:
normal = normals[i-1]
txt = TEMPLATE_NORMAL % (normal[0], normal[1], normal[2])
pieces.append(txt)
str_normals = "\n".join(pieces)
# uvs
pieces = []
for i in new_uvs:
uv = uvs[i-1]
txt = TEMPLATE_UV % (uv[0], uv[1])
pieces.append(txt)
str_uvs = "\n".join(pieces)
# faces
pieces = []
for face in chunk["faces"]:
txt = ""
fv = face["vertex"]
fn = face["normal"]
ft = face["uv"]
if len(fv) == 3:
va = vmap[fv[0]]
vb = vmap[fv[1]]
vc = vmap[fv[2]]
if len(fn) == 3 and len(ft) == 3:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
txt = TEMPLATE_FACE3_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc)
elif len(fn) == 3:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
txt = TEMPLATE_FACE3_VN % (va, na, vb, nb, vc, nc)
elif len(ft) == 3:
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
txt = TEMPLATE_FACE3_VT % (va, ta, vb, tb, vc, tc)
else:
txt = TEMPLATE_FACE3_V % (va, vb, vc)
elif len(fv) == 4:
va = vmap[fv[0]]
vb = vmap[fv[1]]
vc = vmap[fv[2]]
vd = vmap[fv[3]]
if len(fn) == 4 and len(ft) == 4:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
nd = nmap[fn[3]]
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
td = tmap[ft[3]]
txt = TEMPLATE_FACE4_VTN % (va, ta, na, vb, tb, nb, vc, tc, nc, vd, td, nd)
elif len(fn) == 4:
na = nmap[fn[0]]
nb = nmap[fn[1]]
nc = nmap[fn[2]]
nd = nmap[fn[3]]
txt = TEMPLATE_FACE4_VN % (va, na, vb, nb, vc, nc, vd, nd)
elif len(ft) == 4:
ta = tmap[ft[0]]
tb = tmap[ft[1]]
tc = tmap[ft[2]]
td = tmap[ft[3]]
txt = TEMPLATE_FACE4_VT % (va, ta, vb, tb, vc, tc, vd, td)
else:
txt = TEMPLATE_FACE4_V % (va, vb, vc, vd)
pieces.append(txt)
str_faces = "\n".join(pieces)
# generate OBJ string
content = TEMPLATE_OBJ % {
"nfaces" : len(chunk["faces"]),
"nvertices" : len(new_vertices),
"nnormals" : len(new_normals),
"nuvs" : len(new_uvs),
"vertices" : str_vertices,
"normals" : str_normals,
"uvs" : str_uvs,
"faces" : str_faces
}
# write OBJ file
outname = "%s_%03d.obj" % (outfile, mi)
f = open(outname, "w")
f.write(content)
f.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o prefix" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:x:", ["help", "input=", "output=", "truncatescale="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Splitting [%s] into [%s_XXX.obj] ..." % (infile, outfile)
break_obj(infile, outfile)
| mit |
xin3liang/platform_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py | 497 | 13858 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket utilities.
"""
import array
import errno
# Import hash classes from a module available and recommended for each Python
# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
# hashlib module in Python 2.6.
try:
import hashlib
md5_hash = hashlib.md5
sha1_hash = hashlib.sha1
except ImportError:
import md5
import sha
md5_hash = md5.md5
sha1_hash = sha.sha
import StringIO
import logging
import os
import re
import socket
import traceback
import zlib
try:
from mod_pywebsocket import fast_masking
except ImportError:
pass
def get_stack_trace():
"""Get the current stack trace as string.
This is needed to support Python 2.3.
TODO: Remove this when we only support Python 2.4 and above.
Use traceback.format_exc instead.
"""
out = StringIO.StringIO()
traceback.print_exc(file=out)
return out.getvalue()
def prepend_message_to_exception(message, exc):
"""Prepend message to the exception."""
exc.args = (message + str(exc),)
return
def __translate_interp(interp, cygwin_path):
"""Translate interp program path for Win32 python to run cygwin program
(e.g. perl). Note that it doesn't support path that contains space,
which is typically true for Unix, where #!-script is written.
For Win32 python, cygwin_path is a directory of cygwin binaries.
Args:
interp: interp command line
cygwin_path: directory name of cygwin binary, or None
Returns:
translated interp command line.
"""
if not cygwin_path:
return interp
m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
if m:
cmd = os.path.join(cygwin_path, m.group(1))
return cmd + m.group(2)
return interp
def get_script_interp(script_path, cygwin_path=None):
"""Gets #!-interpreter command line from the script.
It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
it could run "/usr/bin/perl -wT hello.pl".
When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
"/usr/bin/perl" to "<cygwin_path>\perl.exe".
Args:
script_path: pathname of the script
cygwin_path: directory name of cygwin binary, or None
Returns:
#!-interpreter command line, or None if it is not #!-script.
"""
fp = open(script_path)
line = fp.readline()
fp.close()
m = re.match('^#!(.*)', line)
if m:
return __translate_interp(m.group(1), cygwin_path)
return None
def wrap_popen3_for_win(cygwin_path):
"""Wrap popen3 to support #!-script on Windows.
Args:
cygwin_path: path for cygwin binary if command path is needed to be
translated. None if no translation required.
"""
__orig_popen3 = os.popen3
def __wrap_popen3(cmd, mode='t', bufsize=-1):
cmdline = cmd.split(' ')
interp = get_script_interp(cmdline[0], cygwin_path)
if interp:
cmd = interp + ' ' + cmd
return __orig_popen3(cmd, mode, bufsize)
os.popen3 = __wrap_popen3
def hexify(s):
return ' '.join(map(lambda x: '%02x' % ord(x), s))
def get_class_logger(o):
return logging.getLogger(
'%s.%s' % (o.__class__.__module__, o.__class__.__name__))
class NoopMasker(object):
"""A masking object that has the same interface as RepeatedXorMasker but
just returns the string passed in without making any change.
"""
def __init__(self):
pass
def mask(self, s):
return s
class RepeatedXorMasker(object):
"""A masking object that applies XOR on the string given to mask method
with the masking bytes given to the constructor repeatedly. This object
remembers the position in the masking bytes the last mask method call
ended and resumes from that point on the next mask method call.
"""
def __init__(self, masking_key):
self._masking_key = masking_key
self._masking_key_index = 0
def _mask_using_swig(self, s):
masked_data = fast_masking.mask(
s, self._masking_key, self._masking_key_index)
self._masking_key_index = (
(self._masking_key_index + len(s)) % len(self._masking_key))
return masked_data
def _mask_using_array(self, s):
result = array.array('B')
result.fromstring(s)
# Use temporary local variables to eliminate the cost to access
# attributes
masking_key = map(ord, self._masking_key)
masking_key_size = len(masking_key)
masking_key_index = self._masking_key_index
for i in xrange(len(result)):
result[i] ^= masking_key[masking_key_index]
masking_key_index = (masking_key_index + 1) % masking_key_size
self._masking_key_index = masking_key_index
return result.tostring()
if 'fast_masking' in globals():
mask = _mask_using_swig
else:
mask = _mask_using_array
# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
# deflate library. DICTID won't be added as far as we don't set dictionary.
# LZ77 window of 32K will be used for both compression and decompression.
# For decompression, we can just use 32K to cover any windows size. For
# compression, we use 32K so receivers must use 32K.
#
# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
# to decode.
#
# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
# Python. See also RFC1950 (ZLIB 3.3).
class _Deflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
def compress(self, bytes):
compressed_bytes = self._compress.compress(bytes)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_flush(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_finish(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_FINISH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
class _Inflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._window_bits = window_bits
self._unconsumed = ''
self.reset()
def decompress(self, size):
if not (size == -1 or size > 0):
raise Exception('size must be -1 or positive')
data = ''
while True:
if size == -1:
data += self._decompress.decompress(self._unconsumed)
# See Python bug http://bugs.python.org/issue12050 to
# understand why the same code cannot be used for updating
# self._unconsumed for here and else block.
self._unconsumed = ''
else:
data += self._decompress.decompress(
self._unconsumed, size - len(data))
self._unconsumed = self._decompress.unconsumed_tail
if self._decompress.unused_data:
# Encountered a last block (i.e. a block with BFINAL = 1) and
# found a new stream (unused_data). We cannot use the same
# zlib.Decompress object for the new stream. Create a new
# Decompress object to decompress the new one.
#
# It's fine to ignore unconsumed_tail if unused_data is not
# empty.
self._unconsumed = self._decompress.unused_data
self.reset()
if size >= 0 and len(data) == size:
# data is filled. Don't call decompress again.
break
else:
# Re-invoke Decompress.decompress to try to decompress all
# available bytes before invoking read which blocks until
# any new byte is available.
continue
else:
# Here, since unused_data is empty, even if unconsumed_tail is
# not empty, bytes of requested length are already in data. We
# don't have to "continue" here.
break
if data:
self._logger.debug('Decompressed %r', data)
return data
def append(self, data):
self._logger.debug('Appended %r', data)
self._unconsumed += data
def reset(self):
self._logger.debug('Reset')
self._decompress = zlib.decompressobj(-self._window_bits)
# Compresses/decompresses given octets using the method introduced in RFC1979.
class _RFC1979Deflater(object):
"""A compressor class that applies DEFLATE to given byte sequence and
flushes using the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits, no_context_takeover):
self._deflater = None
if window_bits is None:
window_bits = zlib.MAX_WBITS
self._window_bits = window_bits
self._no_context_takeover = no_context_takeover
def filter(self, bytes, end=True, bfinal=False):
if self._deflater is None:
self._deflater = _Deflater(self._window_bits)
if bfinal:
result = self._deflater.compress_and_finish(bytes)
# Add a padding block with BFINAL = 0 and BTYPE = 0.
result = result + chr(0)
self._deflater = None
return result
result = self._deflater.compress_and_flush(bytes)
if end:
# Strip last 4 octets which is LEN and NLEN field of a
# non-compressed block added for Z_SYNC_FLUSH.
result = result[:-4]
if self._no_context_takeover and end:
self._deflater = None
return result
class _RFC1979Inflater(object):
"""A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits=zlib.MAX_WBITS):
self._inflater = _Inflater(window_bits)
def filter(self, bytes):
# Restore stripped LEN and NLEN field of a non-compressed block added
# for Z_SYNC_FLUSH.
self._inflater.append(bytes + '\x00\x00\xff\xff')
return self._inflater.decompress(-1)
class DeflateSocket(object):
"""A wrapper class for socket object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
# Size of the buffer passed to recv to receive compressed data.
_RECV_SIZE = 4096
def __init__(self, socket):
self._socket = socket
self._logger = get_class_logger(self)
self._deflater = _Deflater(zlib.MAX_WBITS)
self._inflater = _Inflater(zlib.MAX_WBITS)
def recv(self, size):
"""Receives data from the socket specified on the construction up
to the specified size. Once any data is available, returns it even
if it's smaller than the specified size.
"""
# TODO(tyoshino): Allow call with size=0. It should block until any
# decompressed data is available.
if size <= 0:
raise Exception('Non-positive size passed')
while True:
data = self._inflater.decompress(size)
if len(data) != 0:
return data
read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
if not read_data:
return ''
self._inflater.append(read_data)
def sendall(self, bytes):
self.send(bytes)
def send(self, bytes):
self._socket.sendall(self._deflater.compress_and_flush(bytes))
return len(bytes)
# vi:sts=4 sw=4 et
| bsd-3-clause |
ludo237/euler-problems | Problems/Problem 32/Python/problem_32.py | 1 | 1427 | '''
Problem 32 from Project Euler
We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1 through 5 pandigital.
The product 7254 is unusual, as the identity, 39 * 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital.i
Find the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital.
REGARDIND THE PERFORMANCE
This is not the final version of the Algorithm, anyone can improve it.
If you have a better solution please submit a pull request, or an issue, on Github.
@author Claudio Ludovico Panetta (@Ludo237)
@version 1.0.0
'''
# "Costants" for our algorithm
MAX = 10000
LOOP = 100
# Creating a simple function so we can keep the code a bit clear
def pand_digital(i, j):
product = i *j
summatory = str(i) + str(j) + str(product)
digits = set(summatory)
if '0' in summatory:
return False
if len(digits) == 9 and len(summatory) == 9:
return True
else:
return False
# The core of our algorithm
print "Algorithm starts, plase wait...\n"
# Using a simple list, lazy boy
digits = set()
# Looping
for i in range(0, LOOP):
for j in range(i, MAX):
# If we found a pandigital number
if(pand_digital(i, j)):
# Add it
digits.add(i *j)
print "Algorithm End\n"
| gpl-2.0 |
multikatt/CouchPotatoServer | libs/tornado/iostream.py | 63 | 60393 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError
from tornado import stack_context
from tornado.util import errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT)
if sys.platform == 'darwin':
# OSX appears to have a race condition that causes send(2) to return
# EPROTOTYPE if called while a socket is being torn down:
# http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
# Since the socket is being closed anyway, treat this as an ECONNRESET
# instead of an unexpected error.
_ERRNO_CONNRESET += (errno.EPROTOTYPE,)
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,)
#######################################################
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
Deprecated since Tornado 4.1.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
# Ensure that the future doesn't log an error because its
# failure was never examined.
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
try:
self._try_inline_read()
except:
future.add_done_callback(lambda f: f.exception())
raise
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum write buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
future.add_done_callback(lambda f: f.exception())
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
for future in futures:
if (isinstance(self.error, (socket.error, IOError)) and
errno_from_exception(self.error) in _ERRNO_CONNRESET):
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# establish a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert self._read_future is None
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if e.args[0] in _ERRNO_CONNRESET:
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if e.args[0] not in _ERRNO_CONNRESET:
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print data
stream.close()
tornado.ioloop.IOLoop.instance().stop()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
If specified, the ``server_hostname`` parameter will be used
in SSL connections for certificate validation (if requested in
the ``ssl_options``) and SNI (if supported; requires
Python 3.2+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
if future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either a dictionary
of options or an `ssl.SSLContext`. If a ``server_hostname``
is given, it will be used for certificate verification
(as configured in the ``ssl_options``).
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
ssl_options = {}
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options,
server_hostname=server_hostname,
server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno not in (errno.EINVAL, errno.ECONNRESET):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be a dictionary
of keywords arguments for `ssl.wrap_socket`, or an `ssl.SSLContext`
object.
"""
self._ssl_options = kwargs.pop('ssl_options', {})
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known
# to cause do_handshake to raise EBADF, so make that error
# quiet as well.
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if (err.args[0] in _ERRNO_CONNRESET or
err.args[0] == errno.EBADF):
return self.close(exc_info=True)
raise
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
# Save the user's callback and run it after the ssl handshake
# has completed.
self._ssl_connect_callback = stack_context.wrap(callback)
self._server_hostname = server_hostname
# Note: Since we don't pass our callback argument along to
# super.connect(), this will always return a Future.
# This is harmless, but a bit less efficient than it could be.
return super(SSLIOStream, self).connect(address, callback=None)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| gpl-3.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/gcloud/sdktools/components/__init__.py | 4 | 4163 | # Copyright 2013 Google Inc. All Rights Reserved.
"""The super-group for the update manager."""
import argparse
import os
import textwrap
from googlecloudsdk.core.util import platforms
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Components(base.Group):
"""List, install, update, or remove Google Cloud SDK components or packages.
"""
detailed_help = {
'DESCRIPTION': textwrap.dedent("""\
{description}
Because you might need only some of the tools in the Cloud SDK to do
your work, you can control which tools are installed on your
workstation. You can install new tools on your workstation when you
find that you need them, and remove tools that you no longer need.
The gcloud command regularly checks whether updates are available for
the tools you already have installed, and gives you the opportunity
to upgrade to the latest version.
Tools can be installed as individual components or as preconfigured
_packages_ of components that are typically all used together to
perform a particular task (such as developing a PHP application on
App Engine).
Certain components _depend_ on other components. When you install a
component that you need, all components upon which it directly or
indirectly depends, and that are not already present on your
workstation, are installed automatically. When you remove a
component, all components that depend on the removed component are
also removed.
"""),
}
@staticmethod
def Args(parser):
"""Sets args for gcloud components."""
# An override for the location to install components into.
parser.add_argument('--sdk-root-override', required=False,
help=argparse.SUPPRESS)
# A different URL to look at instead of the default.
parser.add_argument('--snapshot-url-override', required=False,
help=argparse.SUPPRESS)
# This is not a commonly used option. You can use this flag to create a
# Cloud SDK install for an OS other than the one you are running on.
# Running the updater multiple times for different operating systems could
# result in an inconsistent install.
parser.add_argument('--operating-system-override', required=False,
help=argparse.SUPPRESS)
# This is not a commonly used option. You can use this flag to create a
# Cloud SDK install for a processor architecture other than that of your
# current machine. Running the updater multiple times for different
# architectures could result in an inconsistent install.
parser.add_argument('--architecture-override', required=False,
help=argparse.SUPPRESS)
# pylint:disable=g-missing-docstring
@exceptions.RaiseToolExceptionInsteadOf(platforms.InvalidEnumValue)
def Filter(self, unused_tool_context, args):
if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
log.warning('You are using alternate release channel: [%s]',
config.INSTALLATION_CONFIG.release_channel)
# Always show the URL if using a non standard release channel.
log.warning('Snapshot URL for this release channel is: [%s]',
config.INSTALLATION_CONFIG.snapshot_url)
os_override = platforms.OperatingSystem.FromId(
args.operating_system_override)
arch_override = platforms.Architecture.FromId(args.architecture_override)
platform = platforms.Platform.Current(os_override, arch_override)
root = (os.path.expanduser(args.sdk_root_override)
if args.sdk_root_override else None)
url = (os.path.expanduser(args.snapshot_url_override)
if args.snapshot_url_override else None)
self.update_manager = update_manager.UpdateManager(
sdk_root=root, url=url, platform_filter=platform)
| apache-2.0 |
freedomsponsors/www.freedomsponsors.org | djangoproject/bitcoin_frespo/models.py | 4 | 1943 | from django.db import models
from django.utils import timezone
class ReceiveAddress(models.Model):
address = models.CharField(max_length=128, blank=True)
available = models.BooleanField(default=True)
@classmethod
def newAddress(cls, address):
receive_address = cls()
receive_address.address = address
receive_address.available = True
return receive_address
def use(self):
self.available = False
self.save()
class MoneySent(models.Model):
from_address = models.CharField(max_length=128)
to_address = models.CharField(max_length=128)
value = models.DecimalField(max_digits=16, decimal_places=8)
transaction_hash = models.CharField(max_length=128, null=True)
status = models.CharField(max_length=30)
creationDate = models.DateTimeField()
lastChangeDate = models.DateTimeField()
CREATED = 'CREATED'
SENT = 'SENT'
CONFIRMED_IPN = 'CONFIRMED_IPN'
CONFIRMED_TRN = 'CONFIRMED_TRN'
@classmethod
def newMoneySent(cls, from_address, to_address, value):
money_sent = cls()
money_sent.from_address = from_address
money_sent.to_address = to_address
money_sent.value = value
money_sent.status = MoneySent.CREATED
money_sent.creationDate = timezone.now()
money_sent.lastChangeDate = money_sent.creationDate
return money_sent
def touch(self):
self.lastChangeDate = timezone.now()
def sent(self, transaction_hash):
self.status = MoneySent.SENT
self.transaction_hash = transaction_hash
self.touch()
self.save()
def confirm_ipn(self):
if self.status == MoneySent.CREATED or self.status == MoneySent.SENT:
self.status = MoneySent.CONFIRMED_IPN
self.touch()
self.save()
def confirm_trn(self):
self.status = MoneySent.CONFIRMED_TRN
self.touch()
self.save() | agpl-3.0 |
Mistobaan/tensorflow | tensorflow/python/data/ops/readers.py | 8 | 6071 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.util import convert
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
# TODO(b/64974358): Increase default buffer size to 256 MB.
_DEFAULT_READER_BUFFER_SIZE_BYTES = 256 * 1024 # 256 KB
class TextLineDataset(Dataset):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TextLineDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer. A value of 0 results in the default buffering values chosen
based on the compression type.
"""
super(TextLineDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
def _as_variant_tensor(self):
return gen_dataset_ops.text_line_dataset(
self._filenames, self._compression_type, self._buffer_size)
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
class TFRecordDataset(Dataset):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self, filenames, compression_type=None, buffer_size=None):
"""Creates a `TFRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. 0 means no buffering.
"""
super(TFRecordDataset, self).__init__()
# Force the type to string even if filenames is an empty list.
self._filenames = ops.convert_to_tensor(
filenames, dtypes.string, name="filenames")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size",
buffer_size,
argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)
def _as_variant_tensor(self):
return gen_dataset_ops.tf_record_dataset(
self._filenames, self._compression_type, self._buffer_size)
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.TensorShape([])
@property
def output_types(self):
return dtypes.string
class FixedLengthRecordDataset(Dataset):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in
each record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes to buffer when reading.
"""
super(FixedLengthRecordDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._record_bytes = ops.convert_to_tensor(
record_bytes, dtype=dtypes.int64, name="record_bytes")
self._header_bytes = convert.optional_param_to_tensor(
"header_bytes", header_bytes)
self._footer_bytes = convert.optional_param_to_tensor(
"footer_bytes", footer_bytes)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
def _as_variant_tensor(self):
return gen_dataset_ops.fixed_length_record_dataset(
self._filenames, self._header_bytes, self._record_bytes,
self._footer_bytes, self._buffer_size)
@property
def output_classes(self):
return ops.Tensor
@property
def output_shapes(self):
return tensor_shape.scalar()
@property
def output_types(self):
return dtypes.string
| apache-2.0 |
deepsrijit1105/edx-platform | lms/djangoapps/courseware/tests/test_user_state_client.py | 33 | 2125 | """
Black-box tests of the DjangoUserStateClient against the semantics
defined in edx_user_state_client.
"""
from collections import defaultdict
from unittest import skip
from django.test import TestCase
from edx_user_state_client.tests import UserStateClientTestBase
from courseware.user_state_client import DjangoXBlockUserStateClient
from courseware.tests.factories import UserFactory
class TestDjangoUserStateClient(UserStateClientTestBase, TestCase):
"""
Tests of the DjangoUserStateClient backend.
"""
__test__ = True
# Tell Django to clean out all databases, not just default
multi_db = True
def _user(self, user_idx):
return self.users[user_idx].username
def _block_type(self, block):
# We only record block state history in DjangoUserStateClient
# when the block type is 'problem'
return 'problem'
def setUp(self):
super(TestDjangoUserStateClient, self).setUp()
self.client = DjangoXBlockUserStateClient()
self.users = defaultdict(UserFactory.create)
# We're skipping these tests because the iter_all_by_block and iter_all_by_course
# are not implemented in the DjangoXBlockUserStateClient
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_blocks_deleted_block(self):
pass
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_blocks_empty(self):
pass
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_blocks_many_users(self):
pass
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_blocks_single_user(self):
pass
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_course_deleted_block(self):
pass
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_course_empty(self):
pass
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_course_single_user(self):
pass
@skip("Not supported by DjangoXBlockUserStateClient")
def test_iter_course_many_users(self):
pass
| agpl-3.0 |
koven2049/hdfs-cloudera-cdh3u3-production | contrib/hod/testing/testHod.py | 182 | 14428 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, getpass, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
import tempfile
from testing.lib import BaseTestSuite, MockLogger, MockHadoopCluster
from hodlib.Hod.hod import hodRunner, hodState
from hodlib.Common.desc import NodePoolDesc
excludes = []
# Information about all clusters is written to a file called clusters.state.
from hodlib.Hod.hod import CLUSTER_DATA_FILE as TEST_CLUSTER_DATA_FILE, \
INVALID_STATE_FILE_MSGS
# Temp directory prefix
TMP_DIR_PREFIX=os.path.join('/tmp', 'hod-%s' % (getpass.getuser()))
# build a config object with all required keys for initializing hod.
def setupConf():
cfg = {
'hod' : {
'original-dir' : os.getcwd(),
'stream' : True,
# store all the info about clusters in this directory
'user_state' : '/tmp/hodtest',
'debug' : 3,
'java-home' : os.getenv('JAVA_HOME'),
'cluster' : 'dummy',
'cluster-factor' : 1.8,
'xrs-port-range' : (32768,65536),
'allocate-wait-time' : 3600,
'temp-dir' : '/tmp/hod'
},
# just set everything to dummy. Need something to initialize the
# node pool description object.
'resource_manager' : {
'id' : 'dummy',
'batch-home' : 'dummy',
'queue' : 'dummy',
}
}
cfg['nodepooldesc'] = NodePoolDesc(cfg['resource_manager'])
return cfg
# Test class that defines methods to test invalid arguments to hod operations.
class test_InvalidArgsOperations(unittest.TestCase):
def setUp(self):
self.cfg = setupConf()
# initialize the mock objects
self.log = MockLogger()
self.cluster = MockHadoopCluster()
# Use the test logger. This will be used for test verification.
self.client = hodRunner(self.cfg, log=self.log, cluster=self.cluster)
# Create the hodState object to set the test state you want.
self.state = hodState(self.cfg['hod']['user_state'])
if not os.path.exists(self.cfg['hod']['user_state']):
os.path.mkdir(self.cfg['hod']['user_state'])
p = os.path.join(self.cfg['hod']['user_state'], '%s.state' % TEST_CLUSTER_DATA_FILE)
# ensure cluster data file exists, so write works in the tests.
f = open(p, 'w')
f.close()
def tearDown(self):
# clean up cluster data file and directory
p = os.path.join(self.cfg['hod']['user_state'], '%s.state' % TEST_CLUSTER_DATA_FILE)
os.remove(p)
os.rmdir(self.cfg['hod']['user_state'])
# Test that list works with deleted cluster directories - more than one entries which are invalid.
def testListInvalidDirectory(self):
userState = { os.path.join(TMP_DIR_PREFIX, 'testListInvalidDirectory1') : '123.dummy.id1',
os.path.join(TMP_DIR_PREFIX, 'testListInvalidDirectory2') : '123.dummy.id2' }
self.__setupClusterState(userState)
self.client._op_list(['list'])
# assert that required errors are logged.
for clusterDir in userState.keys():
self.assertTrue(self.log.hasMessage('cluster state unknown\t%s\t%s' \
% (userState[clusterDir], clusterDir), 'info'))
# simulate a test where a directory is deleted, and created again, without deallocation
clusterDir = os.path.join(TMP_DIR_PREFIX, 'testListEmptyDirectory')
os.makedirs(clusterDir)
self.assertTrue(os.path.isdir(clusterDir))
userState = { clusterDir : '123.dummy.id3' }
self.__setupClusterState(userState, False)
self.client._op_list(['list'])
self.assertTrue(self.log.hasMessage('cluster state unknown\t%s\t%s' \
% (userState[clusterDir], clusterDir), 'info'))
os.rmdir(clusterDir)
# Test that info works with a deleted cluster directory
def testInfoInvalidDirectory(self):
clusterDir = os.path.join(TMP_DIR_PREFIX, 'testInfoInvalidDirectory')
userState = { clusterDir : '456.dummy.id' }
self.__setupClusterState(userState)
self.client._op_info(['info', clusterDir])
self.assertTrue(self.log.hasMessage("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (userState[clusterDir], clusterDir), 'critical'))
# simulate a test where a directory is deleted, and created again, without deallocation
clusterDir = os.path.join(TMP_DIR_PREFIX, 'testInfoEmptyDirectory')
os.makedirs(clusterDir)
self.assertTrue(os.path.isdir(clusterDir))
userState = { clusterDir : '456.dummy.id1' }
self.__setupClusterState(userState, False)
self.client._op_info(['info', clusterDir])
self.assertTrue(self.log.hasMessage("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (userState[clusterDir], clusterDir), 'critical'))
os.rmdir(clusterDir)
# Test info works with an invalid cluster directory
def testInfoNonExistentDirectory(self):
clusterDir = '/tmp/hod/testInfoNonExistentDirectory'
self.client._op_info(['info', clusterDir])
self.assertTrue(self.log.hasMessage("Invalid hod.clusterdir(--hod.clusterdir or -d). %s : No such directory" % (clusterDir), 'critical'))
# Test that deallocation works on a deleted cluster directory
# by clearing the job, and removing the state
def testDeallocateInvalidDirectory(self):
clusterDir = os.path.join(TMP_DIR_PREFIX,'testDeallocateInvalidDirectory')
jobid = '789.dummy.id'
userState = { clusterDir : jobid }
self.__setupClusterState(userState)
self.client._op_deallocate(['deallocate', clusterDir])
# verify job was deleted
self.assertTrue(self.cluster.wasOperationPerformed('delete_job', jobid))
# verify appropriate message was logged.
self.assertTrue(self.log.hasMessage("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (userState[clusterDir], clusterDir), 'critical'))
self.assertTrue(self.log.hasMessage("Freeing resources allocated to the cluster.", 'critical'))
# verify that the state information was cleared.
userState = self.state.read(TEST_CLUSTER_DATA_FILE)
self.assertFalse(clusterDir in userState.keys())
# simulate a test where a directory is deleted, and created again, without deallocation
clusterDir = os.path.join(TMP_DIR_PREFIX,'testDeallocateEmptyDirectory')
os.makedirs(clusterDir)
self.assertTrue(os.path.isdir(clusterDir))
jobid = '789.dummy.id1'
userState = { clusterDir : jobid }
self.__setupClusterState(userState, False)
self.client._op_deallocate(['deallocate', clusterDir])
# verify job was deleted
self.assertTrue(self.cluster.wasOperationPerformed('delete_job', jobid))
# verify appropriate message was logged.
self.assertTrue(self.log.hasMessage("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (userState[clusterDir], clusterDir), 'critical'))
self.assertTrue(self.log.hasMessage("Freeing resources allocated to the cluster.", 'critical'))
# verify that the state information was cleared.
userState = self.state.read(TEST_CLUSTER_DATA_FILE)
self.assertFalse(clusterDir in userState.keys())
os.rmdir(clusterDir)
# Test that deallocation works on a nonexistent directory.
def testDeallocateNonExistentDirectory(self):
clusterDir = os.path.join(TMP_DIR_PREFIX,'testDeallocateNonExistentDirectory')
self.client._op_deallocate(['deallocate', clusterDir])
# there should be no call..
self.assertFalse(self.cluster.wasOperationPerformed('delete_job', None))
self.assertTrue(self.log.hasMessage("Invalid hod.clusterdir(--hod.clusterdir or -d). %s : No such directory" % (clusterDir), 'critical'))
# Test that allocation on an previously deleted directory fails.
def testAllocateOnDeletedDirectory(self):
clusterDir = os.path.join(TMP_DIR_PREFIX, 'testAllocateOnDeletedDirectory')
os.makedirs(clusterDir)
self.assertTrue(os.path.isdir(clusterDir))
jobid = '1234.abc.com'
userState = { clusterDir : jobid }
self.__setupClusterState(userState, False)
self.client._op_allocate(['allocate', clusterDir, '3'])
self.assertTrue(self.log.hasMessage("Found a previously allocated cluster at "\
"cluster directory '%s'. HOD cannot determine if this cluster "\
"can be automatically deallocated. Deallocate the cluster if it "\
"is unused." % (clusterDir), 'critical'))
os.rmdir(clusterDir)
def __setupClusterState(self, clusterStateMap, verifyDirIsAbsent=True):
for clusterDir in clusterStateMap.keys():
# ensure directory doesn't exist, just in case.
if verifyDirIsAbsent:
self.assertFalse(os.path.exists(clusterDir))
# set up required state.
self.state.write(TEST_CLUSTER_DATA_FILE, clusterStateMap)
# verify everything is stored correctly.
state = self.state.read(TEST_CLUSTER_DATA_FILE)
for clusterDir in clusterStateMap.keys():
self.assertTrue(clusterDir in state.keys())
self.assertEquals(clusterStateMap[clusterDir], state[clusterDir])
class test_InvalidHodStateFiles(unittest.TestCase):
def setUp(self):
self.rootDir = '/tmp/hod-%s' % getpass.getuser()
self.cfg = setupConf() # creat a conf
# Modify hod.user_state
self.cfg['hod']['user_state'] = tempfile.mkdtemp(dir=self.rootDir,
prefix='HodTestSuite.test_InvalidHodStateFiles_')
self.log = MockLogger() # mock logger
self.cluster = MockHadoopCluster() # mock hadoop cluster
self.client = hodRunner(self.cfg, log=self.log, cluster=self.cluster)
self.state = hodState(self.cfg['hod']['user_state'])
self.statePath = os.path.join(self.cfg['hod']['user_state'], '%s.state' % \
TEST_CLUSTER_DATA_FILE)
self.clusterDir = tempfile.mkdtemp(dir=self.rootDir,
prefix='HodTestSuite.test_InvalidHodStateFiles_')
def testOperationWithInvalidStateFile(self):
jobid = '1234.hadoop.apache.org'
# create user state file with invalid permissions
stateFile = open(self.statePath, "w")
os.chmod(self.statePath, 000) # has no read/write permissions
self.client._hodRunner__cfg['hod']['operation'] = \
"info %s" % self.clusterDir
ret = self.client.operation()
os.chmod(self.statePath, 700) # restore permissions
stateFile.close()
os.remove(self.statePath)
# print self.log._MockLogger__logLines
self.assertTrue(self.log.hasMessage(INVALID_STATE_FILE_MSGS[0] % \
os.path.realpath(self.statePath), 'critical'))
self.assertEquals(ret, 1)
def testAllocateWithInvalidStateFile(self):
jobid = '1234.hadoop.apache.org'
# create user state file with invalid permissions
stateFile = open(self.statePath, "w")
os.chmod(self.statePath, 0400) # has no write permissions
self.client._hodRunner__cfg['hod']['operation'] = \
"allocate %s %s" % (self.clusterDir, '3')
ret = self.client.operation()
os.chmod(self.statePath, 700) # restore permissions
stateFile.close()
os.remove(self.statePath)
# print self.log._MockLogger__logLines
self.assertTrue(self.log.hasMessage(INVALID_STATE_FILE_MSGS[2] % \
os.path.realpath(self.statePath), 'critical'))
self.assertEquals(ret, 1)
def testAllocateWithInvalidStateStore(self):
jobid = '1234.hadoop.apache.org'
self.client._hodRunner__cfg['hod']['operation'] = \
"allocate %s %s" % (self.clusterDir, 3)
###### check with no executable permissions ######
stateFile = open(self.statePath, "w") # create user state file
os.chmod(self.cfg['hod']['user_state'], 0600)
ret = self.client.operation()
os.chmod(self.cfg['hod']['user_state'], 0700) # restore permissions
stateFile.close()
os.remove(self.statePath)
# print self.log._MockLogger__logLines
self.assertTrue(self.log.hasMessage(INVALID_STATE_FILE_MSGS[0] % \
os.path.realpath(self.statePath), 'critical'))
self.assertEquals(ret, 1)
###### check with no write permissions ######
stateFile = open(self.statePath, "w") # create user state file
os.chmod(self.cfg['hod']['user_state'], 0500)
ret = self.client.operation()
os.chmod(self.cfg['hod']['user_state'], 0700) # restore permissions
stateFile.close()
os.remove(self.statePath)
# print self.log._MockLogger__logLines
self.assertTrue(self.log.hasMessage(INVALID_STATE_FILE_MSGS[0] % \
os.path.realpath(self.statePath), 'critical'))
self.assertEquals(ret, 1)
def tearDown(self):
if os.path.exists(self.clusterDir): os.rmdir(self.clusterDir)
if os.path.exists(self.cfg['hod']['user_state']):
os.rmdir(self.cfg['hod']['user_state'])
class HodTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunHodTests():
# modulename_suite
suite = HodTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunHodTests()
| apache-2.0 |
yashodhank/frappe | frappe/utils/response.py | 8 | 5467 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json
import datetime
import mimetypes
import os
import frappe
from frappe import _
import frappe.model.document
import frappe.utils
import frappe.sessions
import werkzeug.utils
from werkzeug.local import LocalProxy
from werkzeug.wsgi import wrap_file
from werkzeug.wrappers import Response
from werkzeug.exceptions import NotFound, Forbidden
from frappe.core.doctype.file.file import check_file_permission
from frappe.website.render import render
def report_error(status_code):
if (status_code!=404 or frappe.conf.logging) and not frappe.local.flags.disable_traceback:
frappe.errprint(frappe.utils.get_traceback())
response = build_response("json")
response.status_code = status_code
return response
def build_response(response_type=None):
if "docs" in frappe.local.response and not frappe.local.response.docs:
del frappe.local.response["docs"]
response_type_map = {
'csv': as_csv,
'download': as_raw,
'json': as_json,
'page': as_page,
'redirect': redirect
}
return response_type_map[frappe.response.get('type') or response_type]()
def as_csv():
response = Response()
response.mimetype = 'text/csv'
response.charset = 'utf-8'
response.headers[b"Content-Disposition"] = ("attachment; filename=\"%s.csv\"" % frappe.response['doctype'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['result']
return response
def as_raw():
response = Response()
response.mimetype = frappe.response.get("content_type") or mimetypes.guess_type(frappe.response['filename'])[0] or b"application/unknown"
response.headers[b"Content-Disposition"] = ("filename=\"%s\"" % frappe.response['filename'].replace(' ', '_')).encode("utf-8")
response.data = frappe.response['filecontent']
return response
def as_json():
make_logs()
response = Response()
if frappe.local.response.http_status_code:
response.status_code = frappe.local.response['http_status_code']
del frappe.local.response['http_status_code']
response.mimetype = 'application/json'
response.charset = 'utf-8'
response.data = json.dumps(frappe.local.response, default=json_handler, separators=(',',':'))
return response
def make_logs(response = None):
"""make strings for msgprint and errprint"""
if not response:
response = frappe.local.response
if frappe.error_log:
# frappe.response['exc'] = json.dumps("\n".join([cstr(d) for d in frappe.error_log]))
response['exc'] = json.dumps([frappe.utils.cstr(d) for d in frappe.local.error_log])
if frappe.local.message_log:
response['_server_messages'] = json.dumps([frappe.utils.cstr(d) for
d in frappe.local.message_log])
if frappe.debug_log and frappe.conf.get("logging") or False:
response['_debug_messages'] = json.dumps(frappe.local.debug_log)
def json_handler(obj):
"""serialize non-serializable data for json"""
# serialize date
if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)):
return unicode(obj)
elif isinstance(obj, LocalProxy):
return unicode(obj)
elif isinstance(obj, frappe.model.document.BaseDocument):
doc = obj.as_dict(no_nulls=True)
return doc
elif type(obj)==type or isinstance(obj, Exception):
return repr(obj)
else:
raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \
(type(obj), repr(obj))
def as_page():
"""print web page"""
return render(frappe.response['route'], http_status_code=frappe.response.get("http_status_code"))
def redirect():
return werkzeug.utils.redirect(frappe.response.location)
def download_backup(path):
try:
frappe.only_for(("System Manager", "Administrator"))
except frappe.PermissionError:
raise Forbidden(_("You need to be logged in and have System Manager Role to be able to access backups."))
return send_private_file(path)
def download_private_file(path):
"""Checks permissions and sends back private file"""
try:
check_file_permission(path)
except frappe.PermissionError:
raise Forbidden(_("You don't have permission to access this file"))
return send_private_file(path.split("/private", 1)[1])
def send_private_file(path):
path = os.path.join(frappe.local.conf.get('private_path', 'private'), path.strip("/"))
filename = os.path.basename(path)
if frappe.local.request.headers.get('X-Use-X-Accel-Redirect'):
path = '/protected/' + path
response = Response()
response.headers[b'X-Accel-Redirect'] = frappe.utils.encode(path)
else:
filepath = frappe.utils.get_site_path(path)
try:
f = open(filepath, 'rb')
except IOError:
raise NotFound
response = Response(wrap_file(frappe.local.request.environ, f), direct_passthrough=True)
# no need for content disposition and force download. let browser handle its opening.
# response.headers.add(b'Content-Disposition', b'attachment', filename=filename.encode("utf-8"))
response.mimetype = mimetypes.guess_type(filename)[0] or b'application/octet-stream'
return response
def handle_session_stopped():
response = Response("""<html>
<body style="background-color: #EEE;">
<h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto">
Updating.
We will be back in a few moments...
</h3>
</body>
</html>""")
response.status_code = 503
response.content_type = 'text/html'
return response
| mit |
double12gzh/nova | nova/tests/unit/scheduler/filters/test_affinity_filters.py | 56 | 8801 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova import objects
from nova.scheduler.filters import affinity_filter
from nova import test
from nova.tests.unit.scheduler import fakes
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
class TestDifferentHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestDifferentHostFilter, self).setUp()
self.filt_cls = affinity_filter.DifferentHostFilter()
def test_affinity_different_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
inst1 = objects.Instance(uuid='different')
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['same'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_no_list_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': 'same'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['same'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestSameHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestSameHostFilter, self).setUp()
self.filt_cls = affinity_filter.SameHostFilter()
def test_affinity_same_filter_passes(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['same'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': 'same'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
inst1 = objects.Instance(uuid='different')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['same'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
inst1 = objects.Instance(uuid='different')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
def setUp(self):
super(TestSimpleCIDRAffinityFilter, self).setUp()
self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
def test_affinity_simple_cidr_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestGroupAffinityFilter(test.NoDBTestCase):
def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': [policy]}
filter_properties['group_hosts'] = []
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties['group_hosts'] = ['host2']
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_passes(self):
self._test_group_anti_affinity_filter_passes(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host1']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_fails(self):
self._test_group_anti_affinity_filter_fails(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['anti-affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity'],
'group_hosts': ['host1']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_passes(self):
self._test_group_affinity_filter_passes(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
def _test_group_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host2']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_fails(self):
self._test_group_affinity_filter_fails(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
| apache-2.0 |
dpgeorge/micropython | tests/basics/memoryview1.py | 12 | 1687 | # test memoryview
try:
memoryview
except:
print("SKIP")
raise SystemExit
try:
import uarray as array
except ImportError:
try:
import array
except ImportError:
print("SKIP")
raise SystemExit
# test reading from bytes
b = b'1234'
m = memoryview(b)
print(len(m))
print(m[0], m[1], m[-1])
print(list(m))
# test writing to bytes
try:
m[0] = 1
except TypeError:
print("TypeError")
try:
m[0:2] = b'00'
except TypeError:
print("TypeError")
# test writing to bytearray
b = bytearray(b)
m = memoryview(b)
m[0] = 1
print(b)
print(list(m))
# test slice
m = memoryview(b'1234')
print(list(m[1:]))
print(list(m[1:-1]))
# this tests get_buffer of memoryview
m = memoryview(bytearray(2))
print(bytearray(m))
print(list(memoryview(memoryview(b'1234')))) # read-only memoryview
a = array.array('i', [1, 2, 3, 4])
m = memoryview(a)
print(list(m))
print(list(m[1:-1]))
m[2] = 6
print(a)
# invalid attribute
try:
memoryview(b'a').noexist
except AttributeError:
print('AttributeError')
# equality
print(memoryview(b'abc') == b'abc')
print(memoryview(b'abc') != b'abc')
print(memoryview(b'abc') == b'xyz')
print(memoryview(b'abc') != b'xyz')
print(b'abc' == memoryview(b'abc'))
print(b'abc' != memoryview(b'abc'))
print(b'abc' == memoryview(b'xyz'))
print(b'abc' != memoryview(b'xyz'))
print(memoryview(b'abcdef')[2:4] == b'cd')
print(memoryview(b'abcdef')[2:4] != b'cd')
print(memoryview(b'abcdef')[2:4] == b'xy')
print(memoryview(b'abcdef')[2:4] != b'xy')
print(b'cd' == memoryview(b'abcdef')[2:4])
print(b'cd' != memoryview(b'abcdef')[2:4])
print(b'xy' == memoryview(b'abcdef')[2:4])
print(b'xy' != memoryview(b'abcdef')[2:4])
| mit |
rcharp/toyota-flask | numpy/numpy/fft/info.py | 68 | 6882 | """
Discrete Fourier Transform (:mod:`numpy.fft`)
=============================================
.. currentmodule:: numpy.fft
Standard FFTs
-------------
.. autosummary::
:toctree: generated/
fft Discrete Fourier transform.
ifft Inverse discrete Fourier transform.
fft2 Discrete Fourier transform in two dimensions.
ifft2 Inverse discrete Fourier transform in two dimensions.
fftn Discrete Fourier transform in N-dimensions.
ifftn Inverse discrete Fourier transform in N dimensions.
Real FFTs
---------
.. autosummary::
:toctree: generated/
rfft Real discrete Fourier transform.
irfft Inverse real discrete Fourier transform.
rfft2 Real discrete Fourier transform in two dimensions.
irfft2 Inverse real discrete Fourier transform in two dimensions.
rfftn Real discrete Fourier transform in N dimensions.
irfftn Inverse real discrete Fourier transform in N dimensions.
Hermitian FFTs
--------------
.. autosummary::
:toctree: generated/
hfft Hermitian discrete Fourier transform.
ihfft Inverse Hermitian discrete Fourier transform.
Helper routines
---------------
.. autosummary::
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
rfftfreq DFT sample frequencies (for usage with rfft, irfft).
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
Background information
----------------------
Fourier analysis is fundamentally a method for expressing a function as a
sum of periodic components, and for recovering the function from those
components. When both the function and its Fourier transform are
replaced with discretized counterparts, it is called the discrete Fourier
transform (DFT). The DFT has become a mainstay of numerical computing in
part because of a very fast algorithm for computing it, called the Fast
Fourier Transform (FFT), which was known to Gauss (1805) and was brought
to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
provide an accessible introduction to Fourier analysis and its
applications.
Because the discrete Fourier transform separates its input into
components that contribute at discrete frequencies, it has a great number
of applications in digital signal processing, e.g., for filtering, and in
this context the discretized input to the transform is customarily
referred to as a *signal*, which exists in the *time domain*. The output
is called a *spectrum* or *transform* and exists in the *frequency
domain*.
Implementation details
----------------------
There are many ways to define the DFT, varying in the sign of the
exponent, normalization, etc. In this implementation, the DFT is defined
as
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
\\qquad k = 0,\\ldots,n-1.
The DFT is in general defined for complex inputs and outputs, and a
single-frequency component at linear frequency :math:`f` is
represented by a complex exponential
:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
is the sampling interval.
The values in the result follow so-called "standard" order: If ``A =
fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of
the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
negative-frequency terms, in order of decreasingly negative frequency.
For an even number of input points, ``A[n/2]`` represents both positive and
negative Nyquist frequency, and is also purely real for real input. For
an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
of corresponding elements in the output. The routine
``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
that shift.
When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
The phase spectrum is obtained by ``np.angle(A)``.
The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
\\qquad m = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the normalization by :math:`1/n`.
Real and Hermitian transforms
-----------------------------
When the input is purely real, its transform is Hermitian, i.e., the
component at frequency :math:`f_k` is the complex conjugate of the
component at frequency :math:`-f_k`, which means that for real
inputs there is no information in the negative frequency components that
is not already available from the positive frequency components.
The family of `rfft` functions is
designed to operate on real inputs, and exploits this symmetry by
computing only the positive frequency components, up to and including the
Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
output points. The inverses of this family assumes the same symmetry of
its input, and for an output of ``n`` points uses ``n/2+1`` input points.
Correspondingly, when the spectrum is purely real, the signal is
Hermitian. The `hfft` family of functions exploits this symmetry by
using ``n/2+1`` complex points in the input (time) domain for ``n`` real
points in the frequency domain.
In higher dimensions, FFTs are used, e.g., for image analysis and
filtering. The computational efficiency of the FFT means that it can
also be a faster way to compute large convolutions, using the property
that a convolution in the time domain is equivalent to a point-by-point
multiplication in the frequency domain.
Higher dimensions
-----------------
In two dimensions, the DFT is defined as
.. math::
A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
\\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
which extends in the obvious way to higher dimensions, and the inverses
in higher dimensions also extend in the same way.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
12-13. Cambridge Univ. Press, Cambridge, UK.
Examples
--------
For examples, see the various functions.
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
| apache-2.0 |
LiaoPan/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
CryptoManiac/eloipool-scrypt | gmp-proxy.py | 18 | 3584 | #!/usr/bin/python3
import logging
logging.basicConfig(level=logging.DEBUG)
from binascii import b2a_hex
import bitcoin.txn
import bitcoin.varlen
import jsonrpc
import jsonrpcserver
import jsonrpc_getwork
import merkletree
import socket
from struct import pack
import sys
import threading
from time import time
from util import RejectedShare
try:
import jsonrpc.authproxy
jsonrpc.authproxy.USER_AGENT = 'gmp-proxy/0.1'
except:
pass
pool = jsonrpc.ServiceProxy(sys.argv[1])
worklog = {}
currentwork = [None, 0, 0]
def makeMRD(mp):
coinbase = bytes.fromhex(mp['coinbasetxn'])
cbtxn = bitcoin.txn.Txn(coinbase)
cbtxn.disassemble()
cbtxn.originalCB = cbtxn.getCoinbase()
txnlist = [cbtxn,] + list(map(bitcoin.txn.Txn, map(bytes.fromhex, mp['transactions'])))
merkleTree = merkletree.MerkleTree(txnlist)
merkleRoot = None
prevBlock = bytes.fromhex(mp['previousblockhash'])[::-1]
bits = bytes.fromhex(mp['bits'])[::-1]
rollPrevBlk = False
MRD = (merkleRoot, merkleTree, coinbase, prevBlock, bits, rollPrevBlk, mp)
if 'coinbase/append' in mp.get('mutable', ()):
currentwork[:] = (MRD, time(), 0)
else:
currentwork[2] = 0
return MRD
def getMRD():
now = time()
if currentwork[1] < now - 45:
mp = pool.getmemorypool()
MRD = makeMRD(mp)
else:
MRD = currentwork[0]
currentwork[2] += 1
(merkleRoot, merkleTree, coinbase, prevBlock, bits, rollPrevBlk, mp) = MRD
cbtxn = merkleTree.data[0]
coinbase = cbtxn.originalCB + pack('>Q', currentwork[2]).lstrip(b'\0')
if len(coinbase) > 100:
if len(cbtxn.originalCB) > 100:
raise RuntimeError('Pool gave us a coinbase that is too long!')
currentwork[1] = 0
return getMRD()
cbtxn.setCoinbase(coinbase)
cbtxn.assemble()
merkleRoot = merkleTree.merkleRoot()
MRD = (merkleRoot, merkleTree, coinbase, prevBlock, bits, rollPrevBlk, mp)
return MRD
def MakeWork(username):
MRD = getMRD()
(merkleRoot, merkleTree, coinbase, prevBlock, bits, rollPrevBlk, mp) = MRD
timestamp = pack('<L', int(time()))
hdr = b'\1\0\0\0' + prevBlock + merkleRoot + timestamp + bits + b'ppmg'
worklog[hdr[4:68]] = (MRD, time())
return hdr
def SubmitShare(share):
hdr = share['data'][:80]
k = hdr[4:68]
if k not in worklog:
raise RejectedShare('LOCAL unknown-work')
(MRD, issueT) = worklog[k]
(merkleRoot, merkleTree, coinbase, prevBlock, bits, rollPrevBlk, mp) = MRD
cbtxn = merkleTree.data[0]
cbtxn.setCoinbase(coinbase)
cbtxn.assemble()
blkdata = bitcoin.varlen.varlenEncode(len(merkleTree.data))
for txn in merkleTree.data:
blkdata += txn.data
data = b2a_hex(hdr + blkdata).decode('utf8')
a = [data]
if 'workid' in mp:
a.append({'workid': mp['workid']})
rejReason = pool.submitblock(*a)
if not rejReason is None:
currentwork[1] = 0
raise RejectedShare('pool-' + rejReason)
def HandleLP():
global server
# FIXME: get path from gmp!
pool = jsonrpc.ServiceProxy(sys.argv[1].rstrip('/') + '/LP')
while True:
try:
mp = pool.getmemorypool()
break
except socket.timeout:
pass
jsonrpc_getwork._CheckForDupesHACK = {}
makeMRD(mp)
server.wakeLongpoll()
LPThread = None
LPTrackReal = jsonrpcserver.JSONRPCHandler.LPTrack
class LPHook:
def LPTrack(self):
global LPThread
if LPThread is None or not LPThread.is_alive():
LPThread = threading.Thread(target=HandleLP)
LPThread.daemon = True
LPThread.start()
return LPTrackReal(self)
jsonrpcserver.JSONRPCHandler.LPTrack = LPHook.LPTrack
server = jsonrpcserver.JSONRPCServer()
server.getBlockHeader = MakeWork
server.receiveShare = SubmitShare
jsonrpcserver.JSONRPCListener(server, ('', 9332))
server.serve_forever()
| agpl-3.0 |
gangadhar-kadam/mic-erpnext | selling/report/sales_person_target_variance_item_group_wise/sales_person_target_variance_item_group_wise.py | 3 | 5527 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes import _, msgprint
from webnotes.utils import flt
import time
from accounts.utils import get_fiscal_year
from controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
sim_map = get_salesperson_item_month_map(filters)
precision = webnotes.conn.get_value("Global Defaults", None, "float_precision") or 2
data = []
for salesperson, salesperson_items in sim_map.items():
for item_group, monthwise_data in salesperson_items.items():
row = [salesperson, item_group]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "achieved", "variance"]):
value = flt(month_data.get(fieldname), precision)
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "target_on"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = ["Sales Person:Link/Sales Person:80", "Item Group:Link/Item Group:80"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in ["Target (%s)", "Achieved (%s)", "Variance (%s)"]:
if group_months:
columns.append(label % (from_date.strftime("%b") + " - " + to_date.strftime("%b")))
else:
columns.append(label % from_date.strftime("%b"))
return columns + ["Total Target::80", "Total Achieved::80", "Total Variance::80"]
#Get sales person & item group details
def get_salesperson_details(filters):
return webnotes.conn.sql("""select sp.name, td.item_group, td.target_qty,
td.target_amount, sp.distribution_id
from `tabSales Person` sp, `tabTarget Detail` td
where td.parent=sp.name and td.fiscal_year=%s and
ifnull(sp.distribution_id, '')!='' order by sp.name""",
(filters["fiscal_year"]), as_dict=1)
#Get target distribution details of item group
def get_target_distribution_details(filters):
target_details = {}
for d in webnotes.conn.sql("""select bdd.month, bdd.percentage_allocation \
from `tabBudget Distribution Detail` bdd, `tabBudget Distribution` bd, \
`tabTerritory` t where bdd.parent=bd.name and t.distribution_id=bd.name and \
bd.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.month, d)
return target_details
#Get achieved details from sales order
def get_achieved_details(filters):
start_date, end_date = get_fiscal_year(fiscal_year = filters["fiscal_year"])[1:]
return webnotes.conn.sql("""select soi.item_code, soi.qty, soi.amount, so.transaction_date,
st.sales_person, MONTHNAME(so.transaction_date) as month_name
from `tabSales Order Item` soi, `tabSales Order` so, `tabSales Team` st
where soi.parent=so.name and so.docstatus=1 and
st.parent=so.name and so.transaction_date>=%s and
so.transaction_date<=%s""" % ('%s', '%s'),
(start_date, end_date), as_dict=1)
def get_salesperson_item_month_map(filters):
salesperson_details = get_salesperson_details(filters)
tdd = get_target_distribution_details(filters)
achieved_details = get_achieved_details(filters)
sim_map = {}
for sd in salesperson_details:
for month in tdd:
sim_map.setdefault(sd.name, {}).setdefault(sd.item_group, {})\
.setdefault(month, webnotes._dict({
"target": 0.0, "achieved": 0.0
}))
tav_dict = sim_map[sd.name][sd.item_group][month]
for ad in achieved_details:
if (filters["target_on"] == "Quantity"):
tav_dict.target = flt(sd.target_qty) * \
(tdd[month]["percentage_allocation"]/100)
if ad.month_name == month and get_item_group(ad.item_code) == sd.item_group \
and ad.sales_person == sd.name:
tav_dict.achieved += ad.qty
if (filters["target_on"] == "Amount"):
tav_dict.target = flt(sd.target_amount) * \
(tdd[month]["percentage_allocation"]/100)
if ad.month_name == month and get_item_group(ad.item_code) == sd.item_group \
and ad.sales_person == sd.name:
tav_dict.achieved += ad.amount
return sim_map
def get_item_group(item_name):
return webnotes.conn.get_value("Item", item_name, "item_group") | agpl-3.0 |
rkashapov/buildbot | master/buildbot/test/unit/test_scripts_base.py | 9 | 15290 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import errno
import os
import string
import textwrap
from twisted.python import runtime
from twisted.python import usage
from twisted.python.compat import NativeStringIO
from twisted.trial import unittest
from buildbot import config as config_module
from buildbot.scripts import base
from buildbot.test.util import dirs
from buildbot.test.util import misc
from buildbot.test.util.decorators import skipUnlessPlatformIs
class TestIBD(dirs.DirsMixin, misc.StdoutAssertionsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('test')
self.stdout = NativeStringIO()
self.setUpStdoutAssertions()
def test_isBuildmasterDir_no_dir(self):
self.assertFalse(base.isBuildmasterDir(os.path.abspath('test/nosuch')))
self.assertInStdout('error reading')
self.assertInStdout('invalid buildmaster directory')
def test_isBuildmasterDir_no_file(self):
self.assertFalse(base.isBuildmasterDir(os.path.abspath('test')))
self.assertInStdout('error reading')
self.assertInStdout('invalid buildmaster directory')
def test_isBuildmasterDir_no_Application(self):
# Loading of pre-0.9.0 buildbot.tac file should fail.
with open(os.path.join('test', 'buildbot.tac'), 'w') as f:
f.write("foo\nx = Application('buildslave')\nbar")
self.assertFalse(base.isBuildmasterDir(os.path.abspath('test')))
self.assertInStdout('unexpected content')
self.assertInStdout('invalid buildmaster directory')
def test_isBuildmasterDir_matches(self):
with open(os.path.join('test', 'buildbot.tac'), 'w') as f:
f.write("foo\nx = Application('buildmaster')\nbar")
self.assertTrue(base.isBuildmasterDir(os.path.abspath('test')))
self.assertWasQuiet()
class TestTacFallback(dirs.DirsMixin, unittest.TestCase):
"""
Tests for L{base.getConfigFileFromTac}.
"""
def setUp(self):
"""
Create a base directory.
"""
self.basedir = os.path.abspath('basedir')
return self.setUpDirs('basedir')
def _createBuildbotTac(self, contents=None):
"""
Create a C{buildbot.tac} that points to a given C{configfile}
and create that file.
@param configfile: Config file to point at and create.
@type configfile: L{str}
"""
if contents is None:
contents = '#dummy'
tacfile = os.path.join(self.basedir, "buildbot.tac")
with open(tacfile, "wt") as f:
f.write(contents)
return tacfile
def test_getConfigFileFromTac(self):
"""
When L{getConfigFileFromTac} is passed a C{basedir}
containing a C{buildbot.tac}, it reads the location
of the config file from there.
"""
self._createBuildbotTac("configfile='other.cfg'")
foundConfigFile = base.getConfigFileFromTac(
basedir=self.basedir)
self.assertEqual(foundConfigFile, "other.cfg")
def test_getConfigFileFromTac_fallback(self):
"""
When L{getConfigFileFromTac} is passed a C{basedir}
which doesn't contain a C{buildbot.tac},
it returns C{master.cfg}
"""
foundConfigFile = base.getConfigFileFromTac(
basedir=self.basedir)
self.assertEqual(foundConfigFile, 'master.cfg')
def test_getConfigFileFromTac_tacWithoutConfigFile(self):
"""
When L{getConfigFileFromTac} is passed a C{basedir}
containing a C{buildbot.tac}, but C{buildbot.tac} doesn't
define C{configfile}, L{getConfigFileFromTac} returns C{master.cfg}
"""
self._createBuildbotTac()
foundConfigFile = base.getConfigFileFromTac(
basedir=self.basedir)
self.assertEqual(foundConfigFile, 'master.cfg')
def test_getConfigFileFromTac_usingFile(self):
"""
Wehn L{getConfigFileFromTac} is passed a C{basedir}
containing a C{buildbot.tac} which references C{__file__},
that reference points to C{buildbot.tac}.
"""
self._createBuildbotTac(textwrap.dedent("""
from twisted.python.util import sibpath
configfile = sibpath(__file__, "relative.cfg")
"""))
foundConfigFile = base.getConfigFileFromTac(basedir=self.basedir)
self.assertEqual(
foundConfigFile, os.path.join(self.basedir, "relative.cfg"))
class TestSubcommandOptions(unittest.TestCase):
def fakeOptionsFile(self, **kwargs):
self.patch(base.SubcommandOptions, 'loadOptionsFile',
lambda self: kwargs.copy())
def parse(self, cls, *args):
self.opts = cls()
self.opts.parseOptions(args)
return self.opts
class Bare(base.SubcommandOptions):
optFlags = [['foo', 'f', 'Foo!']]
def test_bare_subclass(self):
self.fakeOptionsFile()
opts = self.parse(self.Bare, '-f')
self.assertTrue(opts['foo'])
class ParamsAndOptions(base.SubcommandOptions):
optParameters = [['volume', 'v', '5', 'How Loud?']]
buildbotOptions = [['volcfg', 'volume']]
def test_buildbotOptions(self):
self.fakeOptionsFile()
opts = self.parse(self.ParamsAndOptions)
self.assertEqual(opts['volume'], '5')
def test_buildbotOptions_options(self):
self.fakeOptionsFile(volcfg='3')
opts = self.parse(self.ParamsAndOptions)
self.assertEqual(opts['volume'], '3')
def test_buildbotOptions_override(self):
self.fakeOptionsFile(volcfg='3')
opts = self.parse(self.ParamsAndOptions, '--volume', '7')
self.assertEqual(opts['volume'], '7')
class RequiredOptions(base.SubcommandOptions):
optParameters = [['volume', 'v', None, 'How Loud?']]
requiredOptions = ['volume']
def test_requiredOptions(self):
self.fakeOptionsFile()
self.assertRaises(usage.UsageError,
lambda: self.parse(self.RequiredOptions))
class TestLoadOptionsFile(dirs.DirsMixin, misc.StdoutAssertionsMixin,
unittest.TestCase):
def setUp(self):
self.setUpDirs('test', 'home')
self.opts = base.SubcommandOptions()
self.dir = os.path.abspath('test')
self.home = os.path.abspath('home')
self.setUpStdoutAssertions()
def tearDown(self):
self.tearDownDirs()
def do_loadOptionsFile(self, _here, exp):
# only patch these os.path functions briefly, to
# avoid breaking other parts of the test system
patches = []
if runtime.platformType == 'win32':
from win32com.shell import shell
patches.append(self.patch(shell, 'SHGetFolderPath',
lambda *args: self.home))
else:
def expanduser(p):
return p.replace('~/', self.home + '/')
patches.append(self.patch(os.path, 'expanduser', expanduser))
old_dirname = os.path.dirname
def dirname(p):
# bottom out at self.dir, rather than /
if p == self.dir:
return p
return old_dirname(p)
patches.append(self.patch(os.path, 'dirname', dirname))
try:
self.assertEqual(self.opts.loadOptionsFile(_here=_here), exp)
finally:
for p in patches:
p.restore()
def writeOptionsFile(self, dir, content, bbdir='.buildbot'):
os.makedirs(os.path.join(dir, bbdir))
with open(os.path.join(dir, bbdir, 'options'), 'w') as f:
f.write(content)
def test_loadOptionsFile_subdirs_not_found(self):
subdir = os.path.join(self.dir, 'a', 'b')
os.makedirs(subdir)
self.do_loadOptionsFile(_here=subdir, exp={})
def test_loadOptionsFile_subdirs_at_root(self):
subdir = os.path.join(self.dir, 'a', 'b')
os.makedirs(subdir)
self.writeOptionsFile(self.dir, 'abc="def"')
self.writeOptionsFile(self.home, 'abc=123') # not seen
self.do_loadOptionsFile(_here=subdir, exp={'abc': 'def'})
def test_loadOptionsFile_subdirs_at_tip(self):
subdir = os.path.join(self.dir, 'a', 'b')
os.makedirs(subdir)
self.writeOptionsFile(os.path.join(self.dir, 'a', 'b'), 'abc="def"')
self.writeOptionsFile(self.dir, 'abc=123') # not seen
self.do_loadOptionsFile(_here=subdir, exp={'abc': 'def'})
def test_loadOptionsFile_subdirs_at_homedir(self):
subdir = os.path.join(self.dir, 'a', 'b')
os.makedirs(subdir)
# on windows, the subdir of the home (well, appdata) dir
# is 'buildbot', not '.buildbot'
self.writeOptionsFile(self.home, 'abc=123',
'buildbot' if runtime.platformType == 'win32' else '.buildbot')
self.do_loadOptionsFile(_here=subdir, exp={'abc': 123})
def test_loadOptionsFile_syntax_error(self):
self.writeOptionsFile(self.dir, 'abc=abc')
self.assertRaises(NameError, lambda:
self.do_loadOptionsFile(_here=self.dir, exp={}))
self.assertInStdout('error while reading')
def test_loadOptionsFile_toomany(self):
subdir = os.path.join(self.dir, *tuple(string.ascii_lowercase))
os.makedirs(subdir)
self.do_loadOptionsFile(_here=subdir, exp={})
self.assertInStdout('infinite glories')
# NOTE: testing the ownership check requires patching os.stat, which causes
# other problems since it is so heavily used.
def mkconfig(**kwargs):
config = dict(quiet=False, replace=False, basedir='test')
config.update(kwargs)
return config
class TestLoadConfig(dirs.DirsMixin, misc.StdoutAssertionsMixin,
unittest.TestCase):
def setUp(self):
self.setUpDirs('test')
self.setUpStdoutAssertions()
def tearDown(self):
self.tearDownDirs()
def activeBasedir(self, extra_lines=()):
with open(os.path.join('test', 'buildbot.tac'), 'wt') as f:
f.write("from twisted.application import service\n")
f.write("service.Application('buildmaster')\n")
f.write("\n".join(extra_lines))
def test_checkBasedir(self):
self.activeBasedir()
rv = base.checkBasedir(mkconfig())
self.assertTrue(rv)
self.assertInStdout('checking basedir')
def test_checkBasedir_quiet(self):
self.activeBasedir()
rv = base.checkBasedir(mkconfig(quiet=True))
self.assertTrue(rv)
self.assertWasQuiet()
def test_checkBasedir_no_dir(self):
rv = base.checkBasedir(mkconfig(basedir='doesntexist'))
self.assertFalse(rv)
self.assertInStdout('invalid buildmaster directory')
@skipUnlessPlatformIs('posix')
def test_checkBasedir_active_pidfile(self):
"""
active PID file is giving error.
"""
self.activeBasedir()
# write our own pid in the file
with open(os.path.join('test', 'twistd.pid'), 'w') as f:
f.write(str(os.getpid()))
rv = base.checkBasedir(mkconfig())
self.assertFalse(rv)
self.assertInStdout('still running')
@skipUnlessPlatformIs('posix')
def test_checkBasedir_bad_pidfile(self):
"""
corrupted PID file is giving error.
"""
self.activeBasedir()
with open(os.path.join('test', 'twistd.pid'), 'w') as f:
f.write("xxx")
rv = base.checkBasedir(mkconfig())
self.assertFalse(rv)
self.assertInStdout('twistd.pid contains non-numeric value')
@skipUnlessPlatformIs('posix')
def test_checkBasedir_stale_pidfile(self):
"""
Stale PID file is removed without causing a system exit.
"""
self.activeBasedir()
pidfile = os.path.join('test', 'twistd.pid')
with open(pidfile, 'w') as f:
f.write(str(os.getpid() + 1))
def kill(pid, sig):
raise OSError(errno.ESRCH, "fake")
self.patch(os, "kill", kill)
rv = base.checkBasedir(mkconfig())
self.assertTrue(rv)
self.assertInStdout('Removing stale pidfile test')
self.assertFalse(os.path.exists(pidfile))
@skipUnlessPlatformIs('posix')
def test_checkBasedir_pidfile_kill_error(self):
"""
if ping-killing the PID file does not work, we should error out.
"""
self.activeBasedir()
# write our own pid in the file
pidfile = os.path.join('test', 'twistd.pid')
with open(pidfile, 'w') as f:
f.write(str(os.getpid() + 1))
def kill(pid, sig):
raise OSError(errno.EPERM, "fake")
self.patch(os, "kill", kill)
rv = base.checkBasedir(mkconfig())
self.assertFalse(rv)
self.assertInStdout('Can\'t check status of PID')
self.assertTrue(os.path.exists(pidfile))
def test_checkBasedir_invalid_rotateLength(self):
self.activeBasedir(extra_lines=['rotateLength="32"'])
rv = base.checkBasedir(mkconfig())
self.assertFalse(rv)
self.assertInStdout('ERROR')
self.assertInStdout('rotateLength')
def test_checkBasedir_invalid_maxRotatedFiles(self):
self.activeBasedir(extra_lines=['maxRotatedFiles="64"'])
rv = base.checkBasedir(mkconfig())
self.assertFalse(rv)
self.assertInStdout('ERROR')
self.assertInStdout('maxRotatedFiles')
def test_loadConfig(self):
@classmethod
def loadConfig(cls):
return config_module.MasterConfig()
self.patch(config_module.FileLoader, 'loadConfig', loadConfig)
cfg = base.loadConfig(mkconfig())
self.assertIsInstance(cfg, config_module.MasterConfig)
self.assertInStdout('checking')
def test_loadConfig_ConfigErrors(self):
@classmethod
def loadConfig(cls):
raise config_module.ConfigErrors(['oh noes'])
self.patch(config_module.FileLoader, 'loadConfig', loadConfig)
cfg = base.loadConfig(mkconfig())
self.assertIdentical(cfg, None)
self.assertInStdout('oh noes')
def test_loadConfig_exception(self):
@classmethod
def loadConfig(cls):
raise RuntimeError()
self.patch(config_module.FileLoader, 'loadConfig', loadConfig)
cfg = base.loadConfig(mkconfig())
self.assertIdentical(cfg, None)
self.assertInStdout('RuntimeError')
| gpl-2.0 |
spnow/grr | client/client_actions/plist_test.py | 5 | 4349 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
# Copyright 2010 Google Inc. All Rights Reserved.
"""Tests for grr.client.client_actions.plist."""
import os
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.client import vfs
from grr.lib import flags
from grr.lib import plist as plist_lib
from grr.lib import rdfvalue
from grr.lib import test_lib
# This variable holds the same contents as the ondisk test plist
test_plist_dict = {
"date": 978307200000000,
"nested1":
{
"nested11":
{
"data113": "\xde\xad\xbe\xef",
"key111": "value111",
"key112": "value112"
}
},
"numbers": [1, "2", "3"]
}
# y Safari History plist
safari_plist_dict = {
"WebHistoryDates":
[
{"": "http://www.google.com",
"title": "Google",
"lastVisited": "374606652.9",
"visitCount": 2,
},
{"": "http://www.apple.com",
"title": "Apple",
"lastVisited": "374606652.9",
"visitCount": 1,
},
],
"WebHistoryFileVersion": 1,
}
class PlistTest(test_lib.EmptyActionTest):
def testParseFilter(self):
queries = [
('bla is "red"', True),
('bla.bla is "red"', True),
('bla."bla bliek" is "red"', True),
('bla.bla bliek is "red"', False),
]
for query, result in queries:
if result:
plist_lib.PlistFilterParser(query).Parse()
else:
filter_parser = plist_lib.PlistFilterParser(query)
self.assertRaises(Exception, filter_parser.Parse)
def testMatches(self):
query = '"nested1"."nested11"."key112" contains "value112"'
parser = plist_lib.PlistFilterParser(query).Parse()
matcher = parser.Compile(plist_lib.PlistFilterImplementation)
self.assertEqual(matcher.Matches(test_plist_dict), True)
def testActionFullRetrievalOfAPlist(self):
results = self._RunQuery(query="", context="")
if not results:
raise Exception("no results were found...")
self.assertDictEqual(results[0][0].ToDict(), test_plist_dict)
def testActionSingleValueRetrieval(self):
results = self._RunQuery(query="", context="date")
if not results:
raise Exception("no results were found...")
self.assertEqual(results[0][0], 978307200000000)
def testActionFilteredValueRetrieval(self):
# Numbers does NOT contain a 2, but a "2", this should return nothing
results = self._RunQuery(query="numbers contains 2", context="")
self.assertListEqual(list(list(results)[0]), [])
# This one should return the full dict
results = self._RunQuery(query="numbers contains '2'", context="")
self.assertEqual(results[0][0], test_plist_dict)
# SAFARI PLIST
results = self._RunQuery(plist="History.plist",
query='title contains "oogle"',
context="WebHistoryDates")
self.assertEqual(results[0][0],
safari_plist_dict["WebHistoryDates"][0])
# And now SAFARI XML
results = self._RunQuery(plist="History.xml.plist",
query='title contains "oogle"',
context="WebHistoryDates")
self.assertEqual(results[0][0],
safari_plist_dict["WebHistoryDates"][0])
def testActionNonexistantFile(self):
self.assertRaises(IOError, self._RunQuery,
query="",
context="",
plist="nonexistantfile")
def testActionInvalidFile(self):
self.assertRaises(Exception,
self._RunQuery,
query="",
context="",
plist="History")
def _RunQuery(self, plist="test.plist", query="", context=""):
path = os.path.join(self.base_path, plist)
pathspec = rdfvalue.PathSpec(path=path,
pathtype=rdfvalue.PathSpec.PathType.OS)
plistrequest = rdfvalue.PlistRequest()
plistrequest.query = query
plistrequest.context = context
plistrequest.pathspec = pathspec
return self.RunAction("PlistQuery", plistrequest)
def main(argv):
# Initialize the VFS system
vfs.VFSInit()
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
naoyat/latin | lda_demo.py | 1 | 7117 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from gensim import corpora, models, similarities #, ldamodel
import sys
import re
import latin.ansi_color as ansi_color
import latin.textutil as textutil
import latin.latin_char as char
import latin.latindic as latindic
import latin.util as util
from latin.LatinObject import LatinObject
from latin.Word import Word
from latin.Predicate import Predicate
from latin.AndOr import AndOr, non_genitive
from latin.PrepClause import PrepClause
from latin.Sentence import Sentence
def lookup_all(surfaces_uc):
def lookup(surface):
items = latindic.lookup(surface)
if items: return Word(surface, items)
if char.isupper(surface[0]):
surface_lower = char.tolower(surface)
items = latindic.lookup(surface_lower)
if items: return Word(surface, items)
if surface[-3:] == u'que':
items = latindic.lookup(surface[:-3])
if items:
# return Word(surface[:-3], items, {'enclitic':'que'})
return Word(surface, items, {'enclitic':'que'})
return None
words = []
l = len(surfaces_uc)
i = 0
while i < l:
surface = surfaces_uc[i]
if ord(surface[0]) <= 64: # 辞書引き(記号のみから成る語を除く)
words.append(Word(surface, None))
i += 1
continue
if i < l-1:
surface2 = surface + u' ' + surfaces_uc[i+1]
word2 = lookup(surface2)
# print "word2:", word2.encode('utf-8'), util.render(lu2)
if word2 is not None: # len(word2.items) > 0: #is not None: and len(lu2) > 0:
words.append(word2)
i += 2
continue
word = lookup(surface)
if word is not None:
words.append(word)
else:
words.append(Word(surface, []))
i += 1
# words の中での添字情報をWordインスタンスに保存
for i, word in enumerate(words):
word.index = i
return words
#
# Item -> (priority, pos, base-form)
#
def base_form_of_item(item):
# print item.pos, item.surface, item.item.get('base',None), item.item.get('pres1sg',None), repr(item.item)
if item.pos in ('conj', 'preposition', 'adv', 'pronoun'):
base = None
priority = 9
elif item.pos in ('verb', 'participle'):
base = item.item.get('pres1sg', None)
if base in ('sum', 'meus', 'tuus'):
base = None
priority = 9
else:
priority = 1
elif item.pos in ('noun', 'pronoun', 'adj'):
base = item.item.get('base', None)
priority = 2
else:
base = None #item.surface
priority = 8
if base == '*':
base = None
priority = 9
return (priority, item.pos, base)
#
# Word -> base-form
#
def base_form_of_word(word):
if word.items:
bases = filter(lambda x:x[2], [base_form_of_item(item) for item in word.items])
bases.sort()
if bases:
return bases[0][2]
else:
return None
else:
return None
#
# ["word", ...] -> [base-form, ...]
#
def base_forms_of_words(word_surfaces):
# unicodeに変換して
word_surfaces_uc = [word_surface.decode('utf-8', 'strict') for word_surface in word_surfaces]
# 辞書を引いてから
words = lookup_all(word_surfaces_uc)
return filter(lambda x:x, [base_form_of_word(word) for word in words])
def show_title(title):
print
print ' #'
print ' #', title
print ' #'
print
def main():
latindic.load(auto_macron_mode=False)
show_title('original text')
text = textutil.load_text_from_file('./latin.txt')
print text[:1000], '...'
print
show_title('texts in base-form')
texts_in_baseform = []
for word_surfaces_in_a_sentence in textutil.sentence_stream(textutil.word_stream_from_text(text)):
# print word_surfaces_in_a_sentence
bases = base_forms_of_words(word_surfaces_in_a_sentence)
texts_in_baseform.append(bases)
for sentence in texts_in_baseform[:20]:
print ' '.join([baseform.encode('utf-8') for baseform in sentence])
print '...'
print
show_title('[gensim] dictionary')
dictionary = corpora.Dictionary(texts_in_baseform)
# dictionary.save('/tmp/latintext.dict') # store the dictionary, for future reference
# print dictionary
print '{',
for token, id in dictionary.token2id.items():
print '\"%s\": %d,' % (token.encode('utf-8'), id),
print '}'
# new_doc = "In Crētā īnsulā māgnum labyrinthum Daedalus aedificāvit plēnum viārum flexuōsārum."
# new_bases = base_forms_of_words(new_doc.split())
# # print new_bases
# new_vec = dictionary.doc2bow(new_bases)
# print new_vec
show_title('[gensim] corpus')
corpus = [dictionary.doc2bow(text) for text in texts_in_baseform]
# corpora.MmCorpus.serialize('/tmp/latintext.mm', corpus)
# print corpus
for doc in corpus[:20]:
print doc
print '...'
print
show_title('tf-idf') # term frequency * inverse document frequency
tfidf = models.TfidfModel(corpus) # step 1 -- initialize a model
corpus_tfidf = tfidf[corpus]
for i, doc in enumerate(corpus_tfidf):
print doc
if i == 20: break
print '...'
print
#
def decode_result(item, delim):
def translate(token):
# print "translating \"%s\"..." % token.encode('utf-8')
items = latindic.lookup(token)
return items[0]['ja'] if items else '*'
latin_tokens = re.split(delim, item)[1::2]
jas = [translate(token) for token in latin_tokens]
return ' / '.join(jas) # print "\t", items[0]['ja']
NUM_TOPICS = 80
TOPICS_TO_TAKE = 10
show_title('LSI (Latent Semantic Indexing)')
# initialize an LSI transformation
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=NUM_TOPICS)
# create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi
corpus_lsi = lsi[corpus_tfidf]
topics = lsi.print_topics(TOPICS_TO_TAKE)
for i, item in enumerate(topics):
print "%d) %s" % (1+i, item.encode('utf-8'))
print " ", decode_result(item, '"')
print
print
show_title('LDA (Latent Dirichlet Allocation)')
model = models.ldamodel.LdaModel(corpus, id2word=dictionary, num_topics=NUM_TOPICS)
topics = model.show_topics(topics=TOPICS_TO_TAKE)
for i, item in enumerate(topics):
print "%d) %s" % (1+i, item.encode('utf-8'))
print " ", decode_result(item, ' ?[*+]')
print
print
# show_title('HDP (Hierarchical Dirichlet Process)')
#
# model = models.hdpmodel.HdpModel(corpus, id2word=dictionary)
# topics = model.print_topics(topics=5)
# print topics
# for i, item in enumerate(topics):
# print "%d) %s" % (1+i, item.encode('utf-8'))
# print " ", decode_result(item, ' ?[*+]')
#
# print
if __name__ == '__main__':
main()
| mit |
toontownfunserver/Panda3D-1.9.0 | python/Lib/binhex.py | 216 | 14476 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
def _test():
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()
| bsd-3-clause |
thumbimigwe/golber | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/shell.py | 287 | 15340 | # -*- coding: utf-8 -*-
"""
pygments.lexers.shell
~~~~~~~~~~~~~~~~~~~~~
Lexers for various shells.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, include
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
'PowerShellLexer', 'ShellSessionLexer']
line_re = re.compile('.*?\n')
class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
'.bashrc', 'bashrc', '.bash_*', 'bash_*']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)\s*\b',
Keyword),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<<', Operator), # here-string
(r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Punctuation),
(r'&', Punctuation),
(r'\|', Punctuation),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
if shebang_matches(text, r'(ba|z|)sh'):
return 1
if text.startswith('$ '):
return 0.2
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
*New in Pygments 1.1.*
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\(\S+\))?(?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)'
r'?|\[\S+[@:][^\n]+\].+)[$#%])(.*\n?)' , line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class ShellSessionLexer(Lexer):
"""
Lexer for shell sessions that works with different command prompts
*New in Pygments 1.6.*
"""
name = 'Shell Session'
aliases = ['shell-session']
filenames = ['*.shell-session']
mimetypes = ['application/x-sh-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\[?\S+@[^$#%]+)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
*New in Pygments 0.7.*
"""
name = 'Batchfile'
aliases = ['bat', 'dosbatch', 'winbatch']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
*New in Pygments 0.10.*
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
r'source|stop|suspend|source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
*New in Pygments 1.5.*
"""
name = 'PowerShell'
aliases = ['powershell', 'posh', 'ps1', 'psm1']
filenames = ['*.ps1','*.psm1']
mimetypes = ['text/x-powershell']
flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
keywords = (
'while validateset validaterange validatepattern validatelength '
'validatecount until trap switch return ref process param parameter in '
'if global: function foreach for finally filter end elseif else '
'dynamicparam do default continue cmdletbinding break begin alias \\? '
'% #script #private #local #global mandatory parametersetname position '
'valuefrompipeline valuefrompipelinebypropertyname '
'valuefromremainingarguments helpmessage try catch throw').split()
operators = (
'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
'lt match ne not notcontains notlike notmatch or regex replace '
'wildcard').split()
verbs = (
'write where wait use update unregister undo trace test tee take '
'suspend stop start split sort skip show set send select scroll resume '
'restore restart resolve resize reset rename remove register receive '
'read push pop ping out new move measure limit join invoke import '
'group get format foreach export expand exit enter enable disconnect '
'disable debug cxnew copy convertto convertfrom convert connect '
'complete compare clear checkpoint aggregate add').split()
commenthelp = (
'component description example externalhelp forwardhelpcategory '
'forwardhelptargetname functionality inputs link '
'notes outputs parameter remotehelprunspace role synopsis').split()
tokens = {
'root': [
# we need to count pairs of parentheses for correct highlight
# of '$(...)' blocks in strings
(r'\(', Punctuation, 'child'),
(r'\s+', Text),
(r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
bygroups(Comment, String.Doc, Comment)),
(r'#[^\n]*?$', Comment),
(r'(<|<)#', Comment.Multiline, 'multline'),
(r'@"\n', String.Heredoc, 'heredoc-double'),
(r"@'\n.*?\n'@", String.Heredoc),
# escaped syntax
(r'`[\'"$@-]', Punctuation),
(r'"', String.Double, 'string'),
(r"'([^']|'')*'", String.Single),
(r'(\$|@@|@)((global|script|private|env):)?[a-z0-9_]+',
Name.Variable),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'-(%s)\b' % '|'.join(operators), Operator),
(r'(%s)-[a-z_][a-z0-9_]*\b' % '|'.join(verbs), Name.Builtin),
(r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s
(r'-[a-z_][a-z0-9_]*', Name),
(r'\w+', Name),
(r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation),
],
'child': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'multline': [
(r'[^#&.]+', Comment.Multiline),
(r'#(>|>)', Comment.Multiline, '#pop'),
(r'\.(%s)' % '|'.join(commenthelp), String.Doc),
(r'[#&.]', Comment.Multiline),
],
'string': [
(r"`[0abfnrtv'\"\$]", String.Escape),
(r'[^$`"]+', String.Double),
(r'\$\(', Punctuation, 'child'),
(r'""', String.Double),
(r'[`$]', String.Double),
(r'"', String.Double, '#pop'),
],
'heredoc-double': [
(r'\n"@', String.Heredoc, '#pop'),
(r'\$\(', Punctuation, 'child'),
(r'[^@\n]+"]', String.Heredoc),
(r".", String.Heredoc),
]
}
| mit |
tiagochiavericosta/edx-platform | lms/djangoapps/dashboard/tests/test_sysadmin.py | 83 | 23866 | """
Provide tests for sysadmin dashboard feature in sysadmin.py
"""
import glob
import os
import re
import shutil
import unittest
from util.date_utils import get_time_display, DEFAULT_DATE_TIME_FORMAT
from nose.plugins.attrib import attr
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.utils import override_settings
from django.utils.timezone import utc as UTC
from django.utils.translation import ugettext as _
import mongoengine
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.django_utils import TEST_DATA_XML_MODULESTORE
from dashboard.models import CourseImportLog
from dashboard.sysadmin import Users
from dashboard.git_import import GitImportError
from datetime import datetime
from external_auth.models import ExternalAuthMap
from student.roles import CourseStaffRole, GlobalStaff
from student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
TEST_MONGODB_LOG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'user': '',
'password': '',
'db': 'test_xlog',
}
FEATURES_WITH_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITH_SSL_AUTH['AUTH_USE_CERTIFICATES'] = True
class SysadminBaseTestCase(ModuleStoreTestCase):
"""
Base class with common methods used in XML and Mongo tests
"""
TEST_REPO = 'https://github.com/mitocw/edx4edx_lite.git'
TEST_BRANCH = 'testing_do_not_delete'
TEST_BRANCH_COURSE = SlashSeparatedCourseKey('MITx', 'edx4edx_branch', 'edx4edx')
def setUp(self):
"""Setup test case by adding primary user."""
super(SysadminBaseTestCase, self).setUp(create_user=False)
self.user = UserFactory.create(username='test_user',
email='[email protected]',
password='foo')
self.client = Client()
def _setstaff_login(self):
"""Makes the test user staff and logs them in"""
GlobalStaff().add_users(self.user)
self.client.login(username=self.user.username, password='foo')
def _add_edx4edx(self, branch=None):
"""Adds the edx4edx sample course"""
post_dict = {'repo_location': self.TEST_REPO, 'action': 'add_course', }
if branch:
post_dict['repo_branch'] = branch
return self.client.post(reverse('sysadmin_courses'), post_dict)
def _rm_edx4edx(self):
"""Deletes the sample course from the XML store"""
def_ms = modulestore()
course_path = '{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR))
try:
# using XML store
course = def_ms.courses.get(course_path, None)
except AttributeError:
# Using mongo store
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
# Delete git loaded course
response = self.client.post(
reverse('sysadmin_courses'),
{
'course_id': course.id.to_deprecated_string(),
'action': 'del_course',
}
)
self.addCleanup(self._rm_glob, '{0}_deleted_*'.format(course_path))
return response
def _rm_glob(self, path):
"""
Create a shell expansion of passed in parameter and iteratively
remove them. Must only expand to directories.
"""
for path in glob.glob(path):
shutil.rmtree(path)
def _mkdir(self, path):
"""
Create directory and add the cleanup for it.
"""
os.mkdir(path)
self.addCleanup(shutil.rmtree, path)
@attr('shard_1')
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set")
@override_settings(GIT_IMPORT_WITH_XMLMODULESTORE=True)
class TestSysadmin(SysadminBaseTestCase):
"""
Test sysadmin dashboard features using XMLModuleStore
"""
MODULESTORE = TEST_DATA_XML_MODULESTORE
def test_staff_access(self):
"""Test access controls."""
test_views = ['sysadmin', 'sysadmin_courses', 'sysadmin_staffing', ]
for view in test_views:
response = self.client.get(reverse(view))
self.assertEqual(response.status_code, 302)
self.user.is_staff = False
self.user.save()
logged_in = self.client.login(username=self.user.username,
password='foo')
self.assertTrue(logged_in)
for view in test_views:
response = self.client.get(reverse(view))
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('gitlogs'))
self.assertEqual(response.status_code, 404)
self.user.is_staff = True
self.user.save()
self.client.logout()
self.client.login(username=self.user.username, password='foo')
for view in test_views:
response = self.client.get(reverse(view))
self.assertTrue(response.status_code, 200)
response = self.client.get(reverse('gitlogs'))
self.assertTrue(response.status_code, 200)
def test_user_mod(self):
"""Create and delete a user"""
self._setstaff_login()
self.client.login(username=self.user.username, password='foo')
# Create user tests
# No uname
response = self.client.post(reverse('sysadmin'),
{'action': 'create_user',
'student_fullname': 'blah',
'student_password': 'foozor', })
self.assertIn('Must provide username', response.content.decode('utf-8'))
# no full name
response = self.client.post(reverse('sysadmin'),
{'action': 'create_user',
'student_uname': '[email protected]',
'student_password': 'foozor', })
self.assertIn('Must provide full name', response.content.decode('utf-8'))
# Test create valid user
self.client.post(reverse('sysadmin'),
{'action': 'create_user',
'student_uname': '[email protected]',
'student_fullname': 'test cuser',
'student_password': 'foozor', })
self.assertIsNotNone(
User.objects.get(username='[email protected]',
email='[email protected]'))
# login as new user to confirm
self.assertTrue(self.client.login(
username='[email protected]', password='foozor'))
self.client.logout()
self.client.login(username=self.user.username, password='foo')
# Delete user tests
# Try no username
response = self.client.post(reverse('sysadmin'),
{'action': 'del_user', })
self.assertIn('Must provide username', response.content.decode('utf-8'))
# Try bad usernames
response = self.client.post(reverse('sysadmin'),
{'action': 'del_user',
'student_uname': '[email protected]',
'student_fullname': 'enigma jones', })
self.assertIn('Cannot find user with email address', response.content.decode('utf-8'))
response = self.client.post(reverse('sysadmin'),
{'action': 'del_user',
'student_uname': 'flabbergast',
'student_fullname': 'enigma jones', })
self.assertIn('Cannot find user with username', response.content.decode('utf-8'))
self.client.post(reverse('sysadmin'),
{'action': 'del_user',
'student_uname': '[email protected]',
'student_fullname': 'test cuser', })
self.assertEqual(0, len(User.objects.filter(
username='[email protected]',
email='[email protected]')))
self.assertEqual(1, len(User.objects.all()))
def test_user_csv(self):
"""Download and validate user CSV"""
num_test_users = 100
self._setstaff_login()
# Stuff full of users to test streaming
for user_num in xrange(num_test_users):
Users().create_user('testingman_with_long_name{}'.format(user_num),
'test test')
response = self.client.post(reverse('sysadmin'),
{'action': 'download_users', })
self.assertIn('attachment', response['Content-Disposition'])
self.assertEqual('text/csv', response['Content-Type'])
self.assertIn('test_user', response.content)
self.assertTrue(num_test_users + 2, len(response.content.splitlines()))
# Clean up
User.objects.filter(
username__startswith='testingman_with_long_name').delete()
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH)
def test_authmap_repair(self):
"""Run authmap check and repair"""
self._setstaff_login()
Users().create_user('test0', 'test test')
# Will raise exception, so no assert needed
eamap = ExternalAuthMap.objects.get(external_name='test test')
mitu = User.objects.get(username='test0')
self.assertTrue(check_password(eamap.internal_password, mitu.password))
mitu.set_password('not autogenerated')
mitu.save()
self.assertFalse(check_password(eamap.internal_password, mitu.password))
# Create really non user AuthMap
ExternalAuthMap(external_id='ll',
external_domain='ll',
external_credentials='{}',
external_email='[email protected]',
external_name='c',
internal_password='').save()
response = self.client.post(reverse('sysadmin'),
{'action': 'repair_eamap', })
self.assertIn('{0} test0'.format('Failed in authenticating'),
response.content)
self.assertIn('fixed password', response.content.decode('utf-8'))
self.assertTrue(self.client.login(username='test0',
password=eamap.internal_password))
# Check for all OK
self._setstaff_login()
response = self.client.post(reverse('sysadmin'),
{'action': 'repair_eamap', })
self.assertIn('All ok!', response.content.decode('utf-8'))
def test_xml_course_add_delete(self):
"""add and delete course from xml module store"""
self._setstaff_login()
# Try bad git repo
response = self.client.post(reverse('sysadmin_courses'), {
'repo_location': 'github.com/mitocw/edx4edx_lite',
'action': 'add_course', })
self.assertIn(_("The git repo location should end with '.git', "
"and be a valid url"), response.content.decode('utf-8'))
response = self.client.post(reverse('sysadmin_courses'), {
'repo_location': 'http://example.com/not_real.git',
'action': 'add_course', })
self.assertIn('Unable to clone or pull repository',
response.content.decode('utf-8'))
# Create git loaded course
response = self._add_edx4edx()
def_ms = modulestore()
self.assertEqual('xml', def_ms.get_modulestore_type(None))
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNotNone(course)
# Delete a course
self._rm_edx4edx()
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNone(course)
# Load a bad git branch
response = self._add_edx4edx('asdfasdfasdf')
self.assertIn(GitImportError.REMOTE_BRANCH_MISSING,
response.content.decode('utf-8'))
# Load a course from a git branch
self._add_edx4edx(self.TEST_BRANCH)
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNotNone(course)
self.assertEqual(self.TEST_BRANCH_COURSE, course.id)
self._rm_edx4edx()
# Try and delete a non-existent course
response = self.client.post(reverse('sysadmin_courses'),
{'course_id': 'foobar/foo/blah',
'action': 'del_course', })
self.assertIn('Error - cannot get course with ID',
response.content.decode('utf-8'))
@override_settings(GIT_IMPORT_WITH_XMLMODULESTORE=False)
def test_xml_safety_flag(self):
"""Make sure the settings flag to disable xml imports is working"""
self._setstaff_login()
response = self._add_edx4edx()
self.assertIn('GIT_IMPORT_WITH_XMLMODULESTORE', response.content)
def_ms = modulestore()
course = def_ms.courses.get('{0}/edx4edx_lite'.format(
os.path.abspath(settings.DATA_DIR)), None)
self.assertIsNone(course)
def test_git_pull(self):
"""Make sure we can pull"""
self._setstaff_login()
response = self._add_edx4edx()
response = self._add_edx4edx()
self.assertIn(_("The course {0} already exists in the data directory! "
"(reloading anyway)").format('edx4edx_lite'),
response.content.decode('utf-8'))
self._rm_edx4edx()
def test_staff_csv(self):
"""Download and validate staff CSV"""
self._setstaff_login()
self._add_edx4edx()
def_ms = modulestore()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
CourseStaffRole(course.id).add_users(self.user)
response = self.client.post(reverse('sysadmin_staffing'),
{'action': 'get_staff_csv', })
self.assertIn('attachment', response['Content-Disposition'])
self.assertEqual('text/csv', response['Content-Type'])
columns = ['course_id', 'role', 'username',
'email', 'full_name', ]
self.assertIn(','.join('"' + c + '"' for c in columns),
response.content)
self._rm_edx4edx()
def test_enrollment_page(self):
"""
Adds a course and makes sure that it shows up on the staffing and
enrollment page
"""
self._setstaff_login()
self._add_edx4edx()
response = self.client.get(reverse('sysadmin_staffing'))
self.assertIn('edx4edx', response.content)
self._rm_edx4edx()
@attr('shard_1')
@override_settings(MONGODB_LOG=TEST_MONGODB_LOG)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set")
class TestSysAdminMongoCourseImport(SysadminBaseTestCase):
"""
Check that importing into the mongo module store works
"""
@classmethod
def tearDownClass(cls):
"""Delete mongo log entries after test."""
super(TestSysAdminMongoCourseImport, cls).tearDownClass()
try:
mongoengine.connect(TEST_MONGODB_LOG['db'])
CourseImportLog.objects.all().delete()
except mongoengine.connection.ConnectionError:
pass
def _setstaff_login(self):
"""
Makes the test user staff and logs them in
"""
self.user.is_staff = True
self.user.save()
self.client.login(username=self.user.username, password='foo')
def test_missing_repo_dir(self):
"""
Ensure that we handle a missing repo dir
"""
self._setstaff_login()
if os.path.isdir(getattr(settings, 'GIT_REPO_DIR')):
shutil.rmtree(getattr(settings, 'GIT_REPO_DIR'))
# Create git loaded course
response = self._add_edx4edx()
self.assertIn(GitImportError.NO_DIR,
response.content.decode('UTF-8'))
def test_mongo_course_add_delete(self):
"""
This is the same as TestSysadmin.test_xml_course_add_delete,
but it uses a mongo store
"""
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
def_ms = modulestore()
self.assertFalse('xml' == def_ms.get_modulestore_type(None))
self._add_edx4edx()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
self.assertIsNotNone(course)
self._rm_edx4edx()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
self.assertIsNone(course)
def test_course_info(self):
"""
Check to make sure we are getting git info for courses
"""
# Regex of first 3 columns of course information table row for
# test course loaded from git. Would not have sha1 if
# git_info_for_course failed.
table_re = re.compile(r"""
<tr>\s+
<td>edX\sAuthor\sCourse</td>\s+ # expected test git course name
<td>MITx/edx4edx/edx4edx</td>\s+ # expected test git course_id
<td>[a-fA-F\d]{40}</td> # git sha1 hash
""", re.VERBOSE)
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
# Make sure we don't have any git hashes on the page
response = self.client.get(reverse('sysadmin_courses'))
self.assertNotRegexpMatches(response.content, table_re)
# Now add the course and make sure it does match
response = self._add_edx4edx()
self.assertRegexpMatches(response.content, table_re)
def test_gitlogs(self):
"""
Create a log entry and make sure it exists
"""
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._add_edx4edx()
response = self.client.get(reverse('gitlogs'))
# Check that our earlier import has a log with a link to details
self.assertIn('/gitlogs/MITx/edx4edx/edx4edx', response.content)
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'}))
self.assertIn('======> IMPORTING course',
response.content)
self._rm_edx4edx()
def test_gitlog_date(self):
"""
Make sure the date is timezone-aware and being converted/formatted
properly.
"""
tz_names = [
'America/New_York', # UTC - 5
'Asia/Pyongyang', # UTC + 9
'Europe/London', # UTC
'Canada/Yukon', # UTC - 8
'Europe/Moscow', # UTC + 4
]
tz_format = DEFAULT_DATE_TIME_FORMAT
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._add_edx4edx()
date = CourseImportLog.objects.first().created.replace(tzinfo=UTC)
for timezone in tz_names:
with (override_settings(TIME_ZONE=timezone)):
date_text = get_time_display(date, tz_format, settings.TIME_ZONE)
response = self.client.get(reverse('gitlogs'))
self.assertIn(date_text, response.content.decode('UTF-8'))
self._rm_edx4edx()
def test_gitlog_bad_course(self):
"""
Make sure we gracefully handle courses that don't exist.
"""
self._setstaff_login()
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'Not/Real/Testing'}))
self.assertEqual(404, response.status_code)
def test_gitlog_no_logs(self):
"""
Make sure the template behaves well when rendered despite there not being any logs.
(This is for courses imported using methods other than the git_add_course command)
"""
self._setstaff_login()
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._add_edx4edx()
# Simulate a lack of git import logs
import_logs = CourseImportLog.objects.all()
import_logs.delete()
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'
})
)
self.assertIn(
'No git import logs have been recorded for this course.',
response.content
)
self._rm_edx4edx()
def test_gitlog_pagination_out_of_range_invalid(self):
"""
Make sure the pagination behaves properly when the requested page is out
of range.
"""
self._setstaff_login()
mongoengine.connect(TEST_MONGODB_LOG['db'])
for _ in xrange(15):
CourseImportLog(
course_id=SlashSeparatedCourseKey("test", "test", "test"),
location="location",
import_log="import_log",
git_log="git_log",
repo_dir="repo_dir",
created=datetime.now()
).save()
for page, expected in [(-1, 1), (1, 1), (2, 2), (30, 2), ('abc', 1)]:
response = self.client.get(
'{}?page={}'.format(
reverse('gitlogs'),
page
)
)
self.assertIn(
'Page {} of 2'.format(expected),
response.content
)
CourseImportLog.objects.delete()
def test_gitlog_courseteam_access(self):
"""
Ensure course team users are allowed to access only their own course.
"""
self._mkdir(getattr(settings, 'GIT_REPO_DIR'))
self._setstaff_login()
self._add_edx4edx()
self.user.is_staff = False
self.user.save()
logged_in = self.client.login(username=self.user.username,
password='foo')
response = self.client.get(reverse('gitlogs'))
# Make sure our non privileged user doesn't have access to all logs
self.assertEqual(response.status_code, 404)
# Or specific logs
response = self.client.get(reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'
}))
self.assertEqual(response.status_code, 404)
# Add user as staff in course team
def_ms = modulestore()
course = def_ms.get_course(SlashSeparatedCourseKey('MITx', 'edx4edx', 'edx4edx'))
CourseStaffRole(course.id).add_users(self.user)
self.assertTrue(CourseStaffRole(course.id).has_user(self.user))
logged_in = self.client.login(username=self.user.username,
password='foo')
self.assertTrue(logged_in)
response = self.client.get(
reverse('gitlogs_detail', kwargs={
'course_id': 'MITx/edx4edx/edx4edx'
}))
self.assertIn('======> IMPORTING course',
response.content)
self._rm_edx4edx()
| agpl-3.0 |
factorlibre/OCB | openerp/modules/loading.py | 7 | 21284 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
import itertools
import logging
import os
import sys
import threading
import time
import openerp
import openerp.modules.db
import openerp.modules.graph
import openerp.modules.migration
import openerp.modules.registry
import openerp.osv as osv
import openerp.tools as tools
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models, adapt_version
from module import runs_post_install
_logger = logging.getLogger(__name__)
_test_logger = logging.getLogger('openerp.tests')
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
"""Migrates+Updates or Installs all module nodes from ``graph``
:param graph: graph of module nodes to load
:param status: deprecated parameter, unused, left to avoid changing signature in 8.0
:param perform_checks: whether module descriptors should be checked for validity (prints warnings
for same cases)
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
def load_test(module_name, idref, mode):
cr.commit()
try:
_load_data(cr, module_name, idref, mode, 'test')
return True
except Exception:
_test_logger.exception(
'module %s: an exception occurred in a test', module_name)
return False
finally:
if tools.config.options['test_commit']:
cr.commit()
else:
cr.rollback()
# avoid keeping stale xml_id, etc. in cache
openerp.modules.registry.RegistryManager.clear_caches(cr.dbname)
def _get_files_of_kind(kind):
if kind == 'demo':
kind = ['demo_xml', 'demo']
elif kind == 'data':
kind = ['init_xml', 'update_xml', 'data']
if isinstance(kind, str):
kind = [kind]
files = []
for k in kind:
for f in package.data[k]:
files.append(f)
if k.endswith('_xml') and not (k == 'init_xml' and not f.endswith('.xml')):
# init_xml, update_xml and demo_xml are deprecated except
# for the case of init_xml with yaml, csv and sql files as
# we can't specify noupdate for those file.
correct_key = 'demo' if k.count('demo') else 'data'
_logger.warning(
"module %s: key '%s' is deprecated in favor of '%s' for file '%s'.",
package.name, k, correct_key, f
)
return files
def _load_data(cr, module_name, idref, mode, kind):
"""
kind: data, demo, test, init_xml, update_xml, demo_xml.
noupdate is False, unless it is demo data or it is csv data in
init mode.
"""
try:
if kind in ('demo', 'test'):
threading.currentThread().testing = True
for filename in _get_files_of_kind(kind):
_logger.info("loading %s/%s", module_name, filename)
noupdate = False
if kind in ('demo', 'demo_xml') or (filename.endswith('.csv') and kind in ('init', 'init_xml')):
noupdate = True
tools.convert_file(cr, module_name, filename, idref, mode, noupdate, kind, report)
finally:
if kind in ('demo', 'test'):
threading.currentThread().testing = False
processed_modules = []
loaded_modules = []
registry = openerp.registry(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
_logger.info('loading %d modules...', len(graph))
registry.clear_manual_fields()
# register, instantiate and initialize models for each modules
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
new_install = package.state == 'to install'
if new_install:
py_module = sys.modules['openerp.addons.%s' % (module_name,)]
pre_init = package.info.get('pre_init_hook')
if pre_init:
getattr(py_module, pre_init)(cr)
models = registry.load(cr, package)
loaded_modules.append(package.name)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
registry.setup_models(cr, partial=True)
init_module_models(cr, package.name, models)
idref = {}
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
# Can't put this line out of the loop: ir.module.module will be
# registered by init_module_models() above.
modobj = registry['ir.module.module']
if perform_checks:
modobj.check(cr, SUPERUSER_ID, [module_id])
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data))
_load_data(cr, module_name, idref, mode, kind='data')
has_demo = hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed')
if has_demo:
_load_data(cr, module_name, idref, mode, kind='demo')
cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['demo'], [module_id])
migrations.migrate_module(package, 'post')
if new_install:
post_init = package.info.get('post_init_hook')
if post_init:
getattr(py_module, post_init)(cr, registry)
registry._init_modules.add(package.name)
# validate all the views at a whole
registry['ir.ui.view']._validate_module_views(cr, SUPERUSER_ID, module_name)
if has_demo:
# launch tests only in demo mode, allowing tests to use demo data.
if tools.config.options['test_enable']:
# Yamel test
report.record_result(load_test(module_name, idref, mode))
# Python tests
ir_http = registry['ir.http']
if hasattr(ir_http, '_routing_map'):
# Force routing map to be rebuilt between each module test suite
del(ir_http._routing_map)
report.record_result(openerp.modules.module.run_unit_tests(module_name, cr.dbname))
processed_modules.append(package.name)
ver = adapt_version(package.data['version'])
# Set new modules and dependencies
modobj.write(cr, SUPERUSER_ID, [module_id], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None, {'overwrite': openerp.tools.config["overwrite_existing_translations"]})
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
registry._init_modules.add(package.name)
cr.commit()
_logger.log(25, "%s modules loaded in %.2fs, %s queries", len(graph), time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
registry.clear_manual_fields()
cr.commit()
return loaded_modules, processed_modules
def _check_module_names(cr, module_names):
mod_names = set(module_names)
if 'base' in mod_names:
# ignore dummy 'all' module
if 'all' in mod_names:
mod_names.remove('all')
if mod_names:
cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),))
if cr.dictfetchone()['count'] != len(mod_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks):
"""Loads modules marked with ``states``, adding them to ``graph`` and
``loaded_modules`` and returns a list of installed/upgraded modules."""
processed_modules = []
while True:
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
if not module_list:
break
graph.add_modules(cr, module_list, force)
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
if not processed:
break
return processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False):
initialize_sys_path()
force = []
if force_demo:
force.append('demo')
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
update_module = True # process auto-installed modules
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new registry, just created in
# openerp.modules.registry.RegistryManager.new().
registry = openerp.registry(cr.dbname)
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
report = registry._assertion_report
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report)
if tools.config['load_language'] or update_module:
# some base models are used below, so make sure they are set up
registry.setup_models(cr, partial=True)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
if update_module:
modobj = registry['ir.module.module']
_logger.info('updating modules list')
modobj.update_list(cr, SUPERUSER_ID)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
modobj.invalidate_cache(cr, SUPERUSER_ID, ['state'])
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
# partially installed modules (i.e. installed/to upgrade), to
# offer a consistent system to the second part: installing
# newly selected modules.
# We include the modules 'to remove' in the first step, because
# they are part of the "currently installed" modules. They will
# be dropped in STEP 6 later, before restarting the loading
# process.
# IMPORTANT 2: We have to loop here until all relevant modules have been
# processed, because in some rare cases the dependencies have
# changed, and modules that depend on an uninstalled module
# will not be processed on the first pass.
# It's especially useful for migrations.
previously_processed = -1
while previously_processed < len(processed_modules):
previously_processed = len(processed_modules)
processed_modules += load_marked_modules(cr, graph,
['installed', 'to upgrade', 'to remove'],
force, status, report, loaded_modules, update_module)
if update_module:
processed_modules += load_marked_modules(cr, graph,
['to install'], force, status, report,
loaded_modules, update_module)
registry.setup_models(cr)
# STEP 4: Finish and cleanup installations
if processed_modules:
cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""")
for (model, name) in cr.fetchall():
if model in registry and not registry[model].is_transient() and not isinstance(registry[model], openerp.osv.orm.AbstractModel):
_logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,0,0,0',
model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_'))
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""")
for (model, name) in cr.fetchall():
if model in registry and registry[model].is_transient():
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
if model in registry:
registry[model]._check_removed_columns(cr, log=True)
else:
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
registry['ir.model.data']._process_end(cr, SUPERUSER_ID, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
# STEP 5: Cleanup menus
# Remove menu items that are not referenced by any of other
# (child) menu item, ir_values, or ir_model_data.
# TODO: This code could be a method of ir_ui_menu. Remove menu without actions of children
if update_module:
while True:
cr.execute('''delete from
ir_ui_menu
where
(id not IN (select parent_id from ir_ui_menu where parent_id is not null))
and
(id not IN (select res_id from ir_values where model='ir.ui.menu'))
and
(id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''')
cr.commit()
if not cr.rowcount:
break
else:
_logger.info('removed %d unused menus', cr.rowcount)
# STEP 6: Uninstall modules to remove
if update_module:
# Remove records referenced from ir_model_data for modules to be
# removed (and removed the references from ir_model_data).
cr.execute("SELECT name, id FROM ir_module_module WHERE state=%s", ('to remove',))
modules_to_remove = dict(cr.fetchall())
if modules_to_remove:
pkgs = reversed([p for p in graph if p.name in modules_to_remove])
for pkg in pkgs:
uninstall_hook = pkg.info.get('uninstall_hook')
if uninstall_hook:
py_module = sys.modules['openerp.addons.%s' % (pkg.name,)]
getattr(py_module, uninstall_hook)(cr, registry)
registry['ir.module.module'].module_uninstall(cr, SUPERUSER_ID, modules_to_remove.values())
# Recursive reload, should only happen once, because there should be no
# modules to remove next time
cr.commit()
_logger.info('Reloading registry once more after uninstalling modules')
openerp.api.Environment.reset()
return openerp.modules.registry.RegistryManager.new(cr.dbname, force_demo, status, update_module)
# STEP 7: verify custom views on every model
if update_module:
Views = registry['ir.ui.view']
custom_view_test = True
for model in registry.models.keys():
if not Views._validate_custom_views(cr, SUPERUSER_ID, model):
custom_view_test = False
_logger.error('invalid custom view(s) for model %s', model)
report.record_result(custom_view_test)
if report.failures:
_logger.error('At least one test failed when loading the modules.')
else:
_logger.info('Modules loaded.')
# STEP 8: call _register_hook on every model
for model in registry.models.values():
model._register_hook(cr)
# STEP 9: Run the post-install tests
cr.commit()
t0 = time.time()
t0_sql = openerp.sql_db.sql_counter
if openerp.tools.config['test_enable']:
if update_module:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed' and name = ANY(%s)", (processed_modules,))
else:
cr.execute("SELECT name FROM ir_module_module WHERE state='installed'")
for module_name in cr.fetchall():
report.record_result(openerp.modules.module.run_unit_tests(module_name[0], cr.dbname, position=runs_post_install))
_logger.log(25, "All post-tested in %.2fs, %s queries", time.time() - t0, openerp.sql_db.sql_counter - t0_sql)
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Houzz/luigi | luigi/contrib/scalding.py | 26 | 10702 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
import warnings
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, subprocess.list2cmdline(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
| apache-2.0 |
seppi91/CouchPotatoServer | libs/suds/plugin.py | 196 | 7228 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The plugin module provides classes for implementation
of suds plugins.
"""
from suds import *
from logging import getLogger
log = getLogger(__name__)
class Context(object):
"""
Plugin context.
"""
pass
class InitContext(Context):
"""
Init Context.
@ivar wsdl: The wsdl.
@type wsdl: L{wsdl.Definitions}
"""
pass
class DocumentContext(Context):
"""
The XML document load context.
@ivar url: The URL.
@type url: str
@ivar document: Either the XML text or the B{parsed} document root.
@type document: (str|L{sax.element.Element})
"""
pass
class MessageContext(Context):
"""
The context for sending the soap envelope.
@ivar envelope: The soap envelope to be sent.
@type envelope: (str|L{sax.element.Element})
@ivar reply: The reply.
@type reply: (str|L{sax.element.Element}|object)
"""
pass
class Plugin:
"""
Plugin base.
"""
pass
class InitPlugin(Plugin):
"""
The base class for suds I{init} plugins.
"""
def initialized(self, context):
"""
Suds client initialization.
Called after wsdl the has been loaded. Provides the plugin
with the opportunity to inspect/modify the WSDL.
@param context: The init context.
@type context: L{InitContext}
"""
pass
class DocumentPlugin(Plugin):
"""
The base class for suds I{document} plugins.
"""
def loaded(self, context):
"""
Suds has loaded a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the unparsed document.
Called after each WSDL/XSD document is loaded.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
def parsed(self, context):
"""
Suds has parsed a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the parsed document.
Called after each WSDL/XSD document is parsed.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
class MessagePlugin(Plugin):
"""
The base class for suds I{soap message} plugins.
"""
def marshalled(self, context):
"""
Suds will send the specified soap envelope.
Provides the plugin with the opportunity to inspect/modify
the envelope Document before it is sent.
@param context: The send context.
The I{envelope} is the envelope docuemnt.
@type context: L{MessageContext}
"""
pass
def sending(self, context):
"""
Suds will send the specified soap envelope.
Provides the plugin with the opportunity to inspect/modify
the message text it is sent.
@param context: The send context.
The I{envelope} is the envelope text.
@type context: L{MessageContext}
"""
pass
def received(self, context):
"""
Suds has received the specified reply.
Provides the plugin with the opportunity to inspect/modify
the received XML text before it is SAX parsed.
@param context: The reply context.
The I{reply} is the raw text.
@type context: L{MessageContext}
"""
pass
def parsed(self, context):
"""
Suds has sax parsed the received reply.
Provides the plugin with the opportunity to inspect/modify
the sax parsed DOM tree for the reply before it is unmarshalled.
@param context: The reply context.
The I{reply} is DOM tree.
@type context: L{MessageContext}
"""
pass
def unmarshalled(self, context):
"""
Suds has unmarshalled the received reply.
Provides the plugin with the opportunity to inspect/modify
the unmarshalled reply object before it is returned.
@param context: The reply context.
The I{reply} is unmarshalled suds object.
@type context: L{MessageContext}
"""
pass
class PluginContainer:
"""
Plugin container provides easy method invocation.
@ivar plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
@cvar ctxclass: A dict of plugin method / context classes.
@type ctxclass: dict
"""
domains = {\
'init': (InitContext, InitPlugin),
'document': (DocumentContext, DocumentPlugin),
'message': (MessageContext, MessagePlugin ),
}
def __init__(self, plugins):
"""
@param plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
"""
self.plugins = plugins
def __getattr__(self, name):
domain = self.domains.get(name)
if domain:
plugins = []
ctx, pclass = domain
for p in self.plugins:
if isinstance(p, pclass):
plugins.append(p)
return PluginDomain(ctx, plugins)
else:
raise Exception, 'plugin domain (%s), invalid' % name
class PluginDomain:
"""
The plugin domain.
@ivar ctx: A context.
@type ctx: L{Context}
@ivar plugins: A list of plugins (targets).
@type plugins: list
"""
def __init__(self, ctx, plugins):
self.ctx = ctx
self.plugins = plugins
def __getattr__(self, name):
return Method(name, self)
class Method:
"""
Plugin method.
@ivar name: The method name.
@type name: str
@ivar domain: The plugin domain.
@type domain: L{PluginDomain}
"""
def __init__(self, name, domain):
"""
@param name: The method name.
@type name: str
@param domain: A plugin domain.
@type domain: L{PluginDomain}
"""
self.name = name
self.domain = domain
def __call__(self, **kwargs):
ctx = self.domain.ctx()
ctx.__dict__.update(kwargs)
for plugin in self.domain.plugins:
try:
method = getattr(plugin, self.name, None)
if method and callable(method):
method(ctx)
except Exception, pe:
log.exception(pe)
return ctx
| gpl-3.0 |
eharney/nova | nova/virt/xenapi/network_utils.py | 5 | 1951 | # Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of network
records and their attributes like bridges, PIFs, QoS, as well as
their lookup functions.
"""
from nova import exception
from nova.openstack.common.gettextutils import _
def find_network_with_name_label(session, name_label):
networks = session.network.get_by_name_label(name_label)
if len(networks) == 1:
return networks[0]
elif len(networks) > 1:
raise exception.NovaException(
_('Found non-unique network for name_label %s') %
name_label)
else:
return None
def find_network_with_bridge(session, bridge):
"""Return the network on which the bridge is attached, if found.
The bridge is defined in the nova db and can be found either in the
'bridge' or 'name_label' fields of the XenAPI network record.
"""
expr = ('field "name__label" = "%s" or field "bridge" = "%s"' %
(bridge, bridge))
networks = session.network.get_all_records_where(expr)
if len(networks) == 1:
return networks.keys()[0]
elif len(networks) > 1:
raise exception.NovaException(
_('Found non-unique network for bridge %s') % bridge)
else:
raise exception.NovaException(
_('Found no network for bridge %s') % bridge)
| apache-2.0 |
onyxfish/stovetop | atom/data.py | 19 | 8122 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import atom.core
XML_TEMPLATE = '{http://www.w3.org/XML/1998/namespace}%s'
ATOM_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
APP_TEMPLATE_V1 = '{http://purl.org/atom/app#}%s'
APP_TEMPLATE_V2 = '{http://www.w3.org/2007/app}%s'
class Name(atom.core.XmlElement):
"""The atom:name element."""
_qname = ATOM_TEMPLATE % 'name'
class Email(atom.core.XmlElement):
"""The atom:email element."""
_qname = ATOM_TEMPLATE % 'email'
class Uri(atom.core.XmlElement):
"""The atom:uri element."""
_qname = ATOM_TEMPLATE % 'uri'
class Person(atom.core.XmlElement):
"""A foundation class which atom:author and atom:contributor extend.
A person contains information like name, email address, and web page URI for
an author or contributor to an Atom feed.
"""
name = Name
email = Email
uri = Uri
class Author(Person):
"""The atom:author element.
An author is a required element in Feed unless each Entry contains an Author.
"""
_qname = ATOM_TEMPLATE % 'author'
class Contributor(Person):
"""The atom:contributor element."""
_qname = ATOM_TEMPLATE % 'contributor'
class Link(atom.core.XmlElement):
"""The atom:link element."""
_qname = ATOM_TEMPLATE % 'link'
href = 'href'
rel = 'rel'
type = 'type'
hreflang = 'hreflang'
title = 'title'
length = 'length'
class Generator(atom.core.XmlElement):
"""The atom:generator element."""
_qname = ATOM_TEMPLATE % 'generator'
uri = 'uri'
version = 'version'
class Text(atom.core.XmlElement):
"""A foundation class from which atom:title, summary, etc. extend.
This class should never be instantiated.
"""
type = 'type'
class Title(Text):
"""The atom:title element."""
_qname = ATOM_TEMPLATE % 'title'
class Subtitle(Text):
"""The atom:subtitle element."""
_qname = ATOM_TEMPLATE % 'subtitle'
class Rights(Text):
"""The atom:rights element."""
_qname = ATOM_TEMPLATE % 'rights'
class Summary(Text):
"""The atom:summary element."""
_qname = ATOM_TEMPLATE % 'summary'
class Content(Text):
"""The atom:content element."""
_qname = ATOM_TEMPLATE % 'content'
src = 'src'
class Category(atom.core.XmlElement):
"""The atom:category element."""
_qname = ATOM_TEMPLATE % 'category'
term = 'term'
scheme = 'scheme'
label = 'label'
class Id(atom.core.XmlElement):
"""The atom:id element."""
_qname = ATOM_TEMPLATE % 'id'
class Icon(atom.core.XmlElement):
"""The atom:icon element."""
_qname = ATOM_TEMPLATE % 'icon'
class Logo(atom.core.XmlElement):
"""The atom:logo element."""
_qname = ATOM_TEMPLATE % 'logo'
class Draft(atom.core.XmlElement):
"""The app:draft element which indicates if this entry should be public."""
_qname = (APP_TEMPLATE_V1 % 'draft', APP_TEMPLATE_V2 % 'draft')
class Control(atom.core.XmlElement):
"""The app:control element indicating restrictions on publication.
The APP control element may contain a draft element indicating whether or
not this entry should be publicly available.
"""
_qname = (APP_TEMPLATE_V1 % 'control', APP_TEMPLATE_V2 % 'control')
draft = Draft
class Date(atom.core.XmlElement):
"""A parent class for atom:updated, published, etc."""
class Updated(Date):
"""The atom:updated element."""
_qname = ATOM_TEMPLATE % 'updated'
class Published(Date):
"""The atom:published element."""
_qname = ATOM_TEMPLATE % 'published'
class LinkFinder(object):
"""An "interface" providing methods to find link elements
Entry elements often contain multiple links which differ in the rel
attribute or content type. Often, developers are interested in a specific
type of link so this class provides methods to find specific classes of
links.
This class is used as a mixin in Atom entries and feeds.
"""
def find_url(self, rel):
"""Returns the URL in a link with the desired rel value."""
for link in self.link:
if link.rel == rel and link.href:
return link.href
return None
FindUrl = find_url
def get_link(self, rel):
"""Returns a link object which has the desired rel value.
If you are interested in the URL instead of the link object,
consider using find_url instead.
"""
for link in self.link:
if link.rel == rel and link.href:
return link
return None
GetLink = get_link
def find_self_link(self):
"""Find the first link with rel set to 'self'
Returns:
A str containing the link's href or None if none of the links had rel
equal to 'self'
"""
return self.find_url('self')
FindSelfLink = find_self_link
def get_self_link(self):
return self.get_link('self')
GetSelfLink = get_self_link
def find_edit_link(self):
return self.find_url('edit')
FindEditLink = find_edit_link
def get_edit_link(self):
return self.get_link('edit')
GetEditLink = get_edit_link
def find_edit_media_link(self):
link = self.find_url('edit-media')
# Search for media-edit as well since Picasa API used media-edit instead.
if link is None:
return self.find_url('media-edit')
return link
FindEditMediaLink = find_edit_media_link
def get_edit_media_link(self):
link = self.get_link('edit-media')
if link is None:
return self.get_link('media-edit')
return link
GetEditMediaLink = get_edit_media_link
def find_next_link(self):
return self.find_url('next')
FindNextLink = find_next_link
def get_next_link(self):
return self.get_link('next')
GetNextLink = get_next_link
def find_license_link(self):
return self.find_url('license')
FindLicenseLink = find_license_link
def get_license_link(self):
return self.get_link('license')
GetLicenseLink = get_license_link
def find_alternate_link(self):
return self.find_url('alternate')
FindAlternateLink = find_alternate_link
def get_alternate_link(self):
return self.get_link('alternate')
GetAlternateLink = get_alternate_link
class FeedEntryParent(atom.core.XmlElement, LinkFinder):
"""A super class for atom:feed and entry, contains shared attributes"""
author = [Author]
category = [Category]
contributor = [Contributor]
id = Id
link = [Link]
rights = Rights
title = Title
updated = Updated
def __init__(self, atom_id=None, text=None, *args, **kwargs):
if atom_id is not None:
self.id = atom_id
atom.core.XmlElement.__init__(self, text=text, *args, **kwargs)
class Source(FeedEntryParent):
"""The atom:source element."""
_qname = ATOM_TEMPLATE % 'source'
generator = Generator
icon = Icon
logo = Logo
subtitle = Subtitle
class Entry(FeedEntryParent):
"""The atom:entry element."""
_qname = ATOM_TEMPLATE % 'entry'
content = Content
published = Published
source = Source
summary = Summary
control = Control
class Feed(Source):
"""The atom:feed element which contains entries."""
_qname = ATOM_TEMPLATE % 'feed'
entry = [Entry]
class ExtensionElement(atom.core.XmlElement):
"""Provided for backwards compatibility to the v1 atom.ExtensionElement."""
def __init__(self, tag=None, namespace=None, attributes=None,
children=None, text=None, *args, **kwargs):
if namespace:
self._qname = '{%s}%s' % (namespace, tag)
else:
self._qname = tag
self.children = children or []
self.attributes = attributes or {}
self.text = text
_BecomeChildElement = atom.core.XmlElement._become_child
| mit |
oliverhr/odoo | addons/sale_service/__openerp__.py | 260 | 2447 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Create Tasks on SO',
'version': '1.0',
'category': 'Project Management',
'description': """
Automatically creates project tasks from procurement lines.
===========================================================
This module will automatically create a new task for each procurement order line
(e.g. for sale order lines), if the corresponding product meets the following
characteristics:
* Product Type = Service
* Procurement Method (Order fulfillment) = MTO (Make to Order)
* Supply/Procurement Method = Manufacture
If on top of that a projet is specified on the product form (in the Procurement
tab), then the new task will be created in that specific project. Otherwise, the
new task will not belong to any project, and may be added to a project manually
later.
When the project task is completed or cancelled, the corresponding procurement
is updated accordingly. For example, if this procurement corresponds to a sale
order line, the sale order line will be considered delivered when the task is
completed.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['project', 'procurement', 'sale', 'procurement_jit'],
'data': ['views/sale_service_view.xml'],
'demo': ['demo/sale_service_demo.xml'],
'test': ['test/project_task_procurement.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
erjohnso/ansible | lib/ansible/modules/packaging/os/package.py | 26 | 1892 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Inc
short_description: Generic OS package manager
description:
- Installs, upgrade and removes packages using the underlying OS package manager.
- For Windows targets, use the M(win_package) module instead.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0)."
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
required: true
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: true
use:
description:
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
required: false
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- This module actually calls the pertinent package modules for each system (apt, yum, etc).
- For Windows targets, use the M(win_package) module instead.
'''
EXAMPLES = '''
- name: install the latest version of ntpdate
package:
name: ntpdate
state: latest
# This uses a variable as this changes per distribution.
- name: remove the apache package
package:
name: "{{ apache }}"
state: absent
'''
| gpl-3.0 |
eloquence/unisubs | apps/videos/migrations/0071_auto.py | 5 | 17079 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field followers on 'SubtitleLanguage'
db.create_table('videos_subtitlelanguage_followers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('subtitlelanguage', models.ForeignKey(orm['videos.subtitlelanguage'], null=False)),
('customuser', models.ForeignKey(orm['auth.customuser'], null=False))
))
db.create_unique('videos_subtitlelanguage_followers', ['subtitlelanguage_id', 'customuser_id'])
# Adding M2M table for field followers on 'Video'
db.create_table('videos_video_followers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('video', models.ForeignKey(orm['videos.video'], null=False)),
('customuser', models.ForeignKey(orm['auth.customuser'], null=False))
))
db.create_unique('videos_video_followers', ['video_id', 'customuser_id'])
def backwards(self, orm):
# Removing M2M table for field followers on 'SubtitleLanguage'
db.delete_table('videos_subtitlelanguage_followers')
# Removing M2M table for field followers on 'Video'
db.delete_table('videos_video_followers')
models = {
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.stopnotification': {
'Meta': {'object_name': 'StopNotification'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.subtitle': {
'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'},
'draft': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleDraft']", 'null': 'True'}),
'end_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'})
},
'videos.subtitledraft': {
'Meta': {'object_name': 'SubtitleDraft'},
'browser_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'last_saved_packet': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'was_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.videourl': {
'Meta': {'object_name': 'VideoUrl'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'videoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['videos']
| agpl-3.0 |
sergeScherbakov/bgfx | 3rdparty/scintilla/scripts/ScintillaData.py | 69 | 8599 | # ScintillaData.py - implemented 2013 by Neil Hodgson [email protected]
# Released to the public domain.
# Common code used by Scintilla and SciTE for source file regeneration.
# The ScintillaData object exposes information about Scintilla as properties:
# Version properties
# version
# versionDotted
# versionCommad
#
# Date last modified
# dateModified
# yearModified
# mdyModified
# dmyModified
# myModified
#
# Information about lexers and properties defined in lexers
# lexFiles
# sorted list of lexer files
# lexerModules
# sorted list of module names
# lexerProperties
# sorted list of lexer properties
# propertyDocuments
# dictionary of property documentation { name: document string }
# This file can be run to see the data it provides.
# Requires Python 2.5 or later
from __future__ import with_statement
import codecs, datetime, glob, os, sys, textwrap
import FileGenerator
def FindModules(lexFile):
modules = []
with open(lexFile) as f:
for l in f.readlines():
if l.startswith("LexerModule"):
l = l.replace("(", " ")
modules.append(l.split()[1])
return modules
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
with open(lexFile) as f:
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
with open(lexFile) as f:
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def FindCredits(historyFile):
credits = []
stage = 0
with codecs.open(historyFile, "r", "utf-8") as f:
for l in f.readlines():
l = l.strip()
if stage == 0 and l == "<table>":
stage = 1
elif stage == 1 and l == "</table>":
stage = 2
if stage == 1 and l.startswith("<td>"):
credit = l[4:-5]
if "<a" in l:
title, a, rest = credit.partition("<a href=")
urlplus, bracket, end = rest.partition(">")
name = end.split("<")[0]
url = urlplus[1:-1]
credit = title.strip()
if credit:
credit += " "
credit += name + " " + url
credits.append(credit)
return credits
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def SortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
class ScintillaData:
def __init__(self, scintillaRoot):
# Discover verion information
with open(scintillaRoot + "version.txt") as f:
self.version = f.read().strip()
self.versionDotted = self.version[0] + '.' + self.version[1] + '.' + \
self.version[2]
self.versionCommad = self.version[0] + ', ' + self.version[1] + ', ' + \
self.version[2] + ', 0'
with open(scintillaRoot + "doc/index.html") as f:
self.dateModified = [l for l in f.readlines() if "Date.Modified" in l]\
[0].split('\"')[3]
# 20130602
# index.html, SciTE.html
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
self.yearModified = self.dateModified[0:4]
monthModified = dtModified.strftime("%B")
dayModified = "%d" % dtModified.day
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
# May 22 2013
# index.html, SciTE.html
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
# 22 May 2013
# ScintillaHistory.html -- only first should change
self.myModified = monthModified + " " + self.yearModified
# Find all the lexer source code files
lexFilePaths = glob.glob(scintillaRoot + "lexers/Lex*.cxx")
SortListInsensitive(lexFilePaths)
self.lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
self.lexerModules = []
lexerProperties = set()
self.propertyDocuments = {}
for lexFile in lexFilePaths:
self.lexerModules.extend(FindModules(lexFile))
for k in FindProperties(lexFile).keys():
lexerProperties.add(k)
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
if k not in self.propertyDocuments:
self.propertyDocuments[k] = documents[k]
SortListInsensitive(self.lexerModules)
self.lexerProperties = list(lexerProperties)
SortListInsensitive(self.lexerProperties)
self.credits = FindCredits(scintillaRoot + "doc/ScintillaHistory.html")
def printWrapped(text):
print(textwrap.fill(text, subsequent_indent=" "))
if __name__=="__main__":
sci = ScintillaData("../")
print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad))
print("Date last modified %s %s %s %s %s" % (
sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified))
printWrapped(str(len(sci.lexFiles)) + " lexer files: " + ", ".join(sci.lexFiles))
printWrapped(str(len(sci.lexerModules)) + " lexer modules: " + ", ".join(sci.lexerModules))
printWrapped("Lexer properties: " + ", ".join(sci.lexerProperties))
print("Lexer property documentation:")
documentProperties = list(sci.propertyDocuments.keys())
SortListInsensitive(documentProperties)
for k in documentProperties:
print(" " + k)
print(textwrap.fill(sci.propertyDocuments[k], initial_indent=" ",
subsequent_indent=" "))
print("Credits:")
for c in sci.credits:
if sys.version_info[0] == 2:
print(" " + c.encode("utf-8"))
else:
sys.stdout.buffer.write(b" " + c.encode("utf-8") + b"\n")
| bsd-2-clause |
soarpenguin/ansible | lib/ansible/modules/cloud/openstack/os_nova_host_aggregate.py | 72 | 6745 | #!/usr/bin/python
# Copyright 2016 Jakub Jursa <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_nova_host_aggregate
short_description: Manage OpenStack host aggregates
extends_documentation_fragment: openstack
author: "Jakub Jursa"
version_added: "2.3"
description:
- Create, update, or delete OpenStack host aggregates. If a aggregate
with the supplied name already exists, it will be updated with the
new name, new availability zone, new metadata and new list of hosts.
options:
name:
description: Name of the aggregate.
required: true
metadata:
description: Metadata dict.
required: false
default: None
availability_zone:
description: Availability zone to create aggregate into.
required: false
default: None
hosts:
description: List of hosts to set for an aggregate.
required: false
default: None
state:
description: Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a host aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: present
name: db_aggregate
hosts:
- host1
- host2
metadata:
type: dbcluster
# Delete an aggregate
- os_nova_host_aggregate:
cloud: mycloud
state: absent
name: db_aggregate
'''
RETURN = '''
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(module, aggregate):
new_metadata = (module.params['metadata'] or {})
new_metadata['availability_zone'] = module.params['availability_zone']
if ((module.params['name'] != aggregate.name) or
(module.params['hosts'] is not None and module.params['hosts'] != aggregate.hosts) or
(module.params['availability_zone'] is not None and module.params['availability_zone'] != aggregate.availability_zone) or
(module.params['metadata'] is not None and new_metadata != aggregate.metadata)):
return True
return False
def _system_state_change(module, aggregate):
state = module.params['state']
if state == 'absent' and aggregate:
return True
if state == 'present':
if aggregate is None:
return True
return _needs_update(module, aggregate)
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
metadata=dict(required=False, default=None, type='dict'),
availability_zone=dict(required=False, default=None),
hosts=dict(required=False, default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.9.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.9.0")
name = module.params['name']
metadata = module.params['metadata']
availability_zone = module.params['availability_zone']
hosts = module.params['hosts']
state = module.params['state']
if metadata is not None:
metadata.pop('availability_zone', None)
try:
cloud = shade.operator_cloud(**module.params)
aggregates = cloud.search_aggregates(name_or_id=name)
if len(aggregates) == 1:
aggregate = aggregates[0]
elif len(aggregates) == 0:
aggregate = None
else:
raise Exception("Should not happen")
if module.check_mode:
module.exit_json(changed=_system_state_change(module, aggregate))
if state == 'present':
if aggregate is None:
aggregate = cloud.create_aggregate(name=name,
availability_zone=availability_zone)
if hosts:
for h in hosts:
cloud.add_host_to_aggregate(aggregate.id, h)
if metadata:
cloud.set_aggregate_metadata(aggregate.id, metadata)
changed = True
else:
if _needs_update(module, aggregate):
if availability_zone is not None:
aggregate = cloud.update_aggregate(aggregate.id,
name=name, availability_zone=availability_zone)
if metadata is not None:
metas = metadata
for i in (set(aggregate.metadata.keys()) - set(metadata.keys())):
if i != 'availability_zone':
metas[i] = None
cloud.set_aggregate_metadata(aggregate.id, metas)
if hosts is not None:
for i in (set(aggregate.hosts) - set (hosts)):
cloud.remove_host_from_aggregate(aggregate.id, i)
for i in (set(hosts) - set(aggregate.hosts)):
cloud.add_host_to_aggregate(aggregate.id, i)
changed = True
else:
changed = False
module.exit_json(changed=changed)
elif state == 'absent':
if aggregate is None:
changed=False
else:
cloud.delete_aggregate(aggregate.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
johnnovak/twyg | twyg/tests/css3colors_test.py | 1 | 4379 | import os, sys, unittest
sys.path.append(os.path.join('..'))
from twyg.css3colors import color_to_rgba, rgba_to_color
class TestCSS3Colors(unittest.TestCase):
def test_valid(self):
r, g, b, a = color_to_rgba('aquamarine')
c = rgba_to_color(r, g, b, a, format='rgb')
self.assertEquals('rgb(127, 255, 212)', c)
r, g, b, a = color_to_rgba('000')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#000000', c)
r, g, b, a = color_to_rgba(' 000')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#000000', c)
r, g, b, a = color_to_rgba('#123')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#112233', c)
r, g, b, a = color_to_rgba(' #123')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#112233', c)
r, g, b, a = color_to_rgba('#deadbe')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#deadbe', c)
r, g, b, a = color_to_rgba('#DEaDbE')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#deadbe', c)
r, g, b, a = color_to_rgba('deadbe')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#deadbe', c)
r, g, b, a = color_to_rgba('deADBE')
c = rgba_to_color(r, g, b, a, format='hex')
self.assertEquals('#deadbe', c)
r, g, b, a = color_to_rgba('rgb(11, 22, 44)')
c = rgba_to_color(r, g, b, a, format='rgb')
self.assertEquals('rgb(11, 22, 44)', c)
r, g, b, a = color_to_rgba('rgb(000011, 022, 00044)')
c = rgba_to_color(r, g, b, a, format='rgb')
self.assertEquals('rgb(11, 22, 44)', c)
r, g, b, a = color_to_rgba('rgba(256, -1, 79, .4)')
c = rgba_to_color(r, g, b, a, format='rgba')
self.assertEquals('rgba(255, 0, 79, 0.400)', c)
r, g, b, a = color_to_rgba('rgb(11%, 22%, 44%)')
c = rgba_to_color(r, g, b, a, format='rgb_p')
self.assertEquals('rgb(11%, 22%, 44%)', c)
r, g, b, a = color_to_rgba('rgba(11%, 122%, -44%, -100)')
c = rgba_to_color(r, g, b, a, format='rgba_p')
self.assertEquals('rgba(11%, 100%, 0%, 0.000)', c)
r, g, b, a = color_to_rgba(' rgba( 11%, 122%, -44%, -100 ) ')
c = rgba_to_color(r, g, b, a, format='rgba_p')
self.assertEquals('rgba(11%, 100%, 0%, 0.000)', c)
r, g, b, a = color_to_rgba('hsl(130, 30%, +80%)')
c = rgba_to_color(r, g, b, a, format='hsl')
self.assertEquals('hsl(130, 30%, 80%)', c)
r, g, b, a = color_to_rgba('hsla(+99, 12%, 74%, +.33)')
c = rgba_to_color(r, g, b, a, format='hsla')
self.assertEquals('hsla(99, 12%, 74%, 0.330)', c)
r, g, b, a = color_to_rgba(' hsla( +000099 , 000012% , 074% , .330000 ) ')
c = rgba_to_color(r, g, b, a, format='hsla')
self.assertEquals('hsla(99, 12%, 74%, 0.330)', c)
def test_invalid(self):
self.assertRaises(ValueError, color_to_rgba, 'fuchsiax')
self.assertRaises(ValueError, color_to_rgba, '5')
self.assertRaises(ValueError, color_to_rgba, 'rgb()')
self.assertRaises(ValueError, color_to_rgba, 'rgba()')
self.assertRaises(ValueError, color_to_rgba, 'rgba()')
self.assertRaises(ValueError,
color_to_rgba, 'rgb(64., 128, 255)')
self.assertRaises(ValueError,
color_to_rgba, 'rgba(64, 128, 255,)')
self.assertRaises(ValueError,
color_to_rgba, 'rgb(++64, 128, 255)')
self.assertRaises(ValueError,
color_to_rgba, 'rgba( -64 , +128 , 255., +000.5 )')
self.assertRaises(ValueError,
color_to_rgba, 'rgb(25%, 50, 100%)')
self.assertRaises(ValueError,
color_to_rgba, 'rgba(25.0%, 50%, 100%, 0.5)')
self.assertRaises(ValueError,
color_to_rgba, 'hsl(130, 30, 80%)')
self.assertRaises(ValueError,
color_to_rgba, 'hsla(210., 90%, 70%, 0.5)')
if __name__ == '__main__':
unittest.main()
| mit |
js0701/chromium-crosswalk | tools/generate_library_loader/generate_library_loader.py | 79 | 7149 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Creates a library loader (a header and implementation file),
which is a wrapper for dlopen or direct linking with given library.
The loader makes it possible to have the same client code for both cases,
and also makes it easier to write code using dlopen (and also provides
a standard way to do so, and limits the ugliness just to generated files).
For more info refer to http://crbug.com/162733 .
"""
import optparse
import os.path
import re
import sys
HEADER_TEMPLATE = """// This is generated file. Do not modify directly.
// Path to the code generator: %(generator_path)s .
#ifndef %(unique_prefix)s
#define %(unique_prefix)s
%(wrapped_header_include)s
#include <string>
class %(class_name)s {
public:
%(class_name)s();
~%(class_name)s();
bool Load(const std::string& library_name)
__attribute__((warn_unused_result));
bool loaded() const { return loaded_; }
%(member_decls)s
private:
void CleanUp(bool unload);
#if defined(%(unique_prefix)s_DLOPEN)
void* library_;
#endif
bool loaded_;
// Disallow copy constructor and assignment operator.
%(class_name)s(const %(class_name)s&);
void operator=(const %(class_name)s&);
};
#endif // %(unique_prefix)s
"""
HEADER_MEMBER_TEMPLATE = """ decltype(&::%(function_name)s) %(function_name)s;
"""
IMPL_TEMPLATE = """// This is generated file. Do not modify directly.
// Path to the code generator: %(generator_path)s .
#include "%(generated_header_name)s"
#include <dlfcn.h>
// Put these sanity checks here so that they fire at most once
// (to avoid cluttering the build output).
#if !defined(%(unique_prefix)s_DLOPEN) && !defined(%(unique_prefix)s_DT_NEEDED)
#error neither %(unique_prefix)s_DLOPEN nor %(unique_prefix)s_DT_NEEDED defined
#endif
#if defined(%(unique_prefix)s_DLOPEN) && defined(%(unique_prefix)s_DT_NEEDED)
#error both %(unique_prefix)s_DLOPEN and %(unique_prefix)s_DT_NEEDED defined
#endif
%(class_name)s::%(class_name)s() : loaded_(false) {
}
%(class_name)s::~%(class_name)s() {
CleanUp(loaded_);
}
bool %(class_name)s::Load(const std::string& library_name) {
if (loaded_)
return false;
#if defined(%(unique_prefix)s_DLOPEN)
library_ = dlopen(library_name.c_str(), RTLD_LAZY);
if (!library_)
return false;
#endif
%(member_init)s
loaded_ = true;
return true;
}
void %(class_name)s::CleanUp(bool unload) {
#if defined(%(unique_prefix)s_DLOPEN)
if (unload) {
dlclose(library_);
library_ = NULL;
}
#endif
loaded_ = false;
%(member_cleanup)s
}
"""
IMPL_MEMBER_INIT_TEMPLATE = """
#if defined(%(unique_prefix)s_DLOPEN)
%(function_name)s =
reinterpret_cast<decltype(this->%(function_name)s)>(
dlsym(library_, "%(function_name)s"));
#endif
#if defined(%(unique_prefix)s_DT_NEEDED)
%(function_name)s = &::%(function_name)s;
#endif
if (!%(function_name)s) {
CleanUp(true);
return false;
}
"""
IMPL_MEMBER_CLEANUP_TEMPLATE = """ %(function_name)s = NULL;
"""
def main():
parser = optparse.OptionParser()
parser.add_option('--name')
parser.add_option('--output-cc')
parser.add_option('--output-h')
parser.add_option('--header')
parser.add_option('--bundled-header')
parser.add_option('--use-extern-c', action='store_true', default=False)
parser.add_option('--link-directly', type=int, default=0)
options, args = parser.parse_args()
if not options.name:
parser.error('Missing --name parameter')
if not options.output_cc:
parser.error('Missing --output-cc parameter')
if not options.output_h:
parser.error('Missing --output-h parameter')
if not options.header:
parser.error('Missing --header paramater')
if not args:
parser.error('No function names specified')
# Make sure we are always dealing with paths relative to source tree root
# to avoid issues caused by different relative path roots.
source_tree_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
options.output_cc = os.path.relpath(options.output_cc, source_tree_root)
options.output_h = os.path.relpath(options.output_h, source_tree_root)
# Create a unique prefix, e.g. for header guards.
# Stick a known string at the beginning to ensure this doesn't begin
# with an underscore, which is reserved for the C++ implementation.
unique_prefix = ('LIBRARY_LOADER_' +
re.sub(r'[\W]', '_', options.output_h).upper())
member_decls = []
member_init = []
member_cleanup = []
for fn in args:
member_decls.append(HEADER_MEMBER_TEMPLATE % {
'function_name': fn,
'unique_prefix': unique_prefix
})
member_init.append(IMPL_MEMBER_INIT_TEMPLATE % {
'function_name': fn,
'unique_prefix': unique_prefix
})
member_cleanup.append(IMPL_MEMBER_CLEANUP_TEMPLATE % {
'function_name': fn,
'unique_prefix': unique_prefix
})
header = options.header
if options.link_directly == 0 and options.bundled_header:
header = options.bundled_header
wrapped_header_include = '#include %s\n' % header
# Some libraries (e.g. libpci) have headers that cannot be included
# without extern "C", otherwise they cause the link to fail.
# TODO(phajdan.jr): This is a workaround for broken headers. Remove it.
if options.use_extern_c:
wrapped_header_include = 'extern "C" {\n%s\n}\n' % wrapped_header_include
# It seems cleaner just to have a single #define here and #ifdefs in bunch
# of places, rather than having a different set of templates, duplicating
# or complicating more code.
if options.link_directly == 0:
wrapped_header_include += '#define %s_DLOPEN\n' % unique_prefix
elif options.link_directly == 1:
wrapped_header_include += '#define %s_DT_NEEDED\n' % unique_prefix
else:
parser.error('Invalid value for --link-directly. Should be 0 or 1.')
# Make it easier for people to find the code generator just in case.
# Doing it this way is more maintainable, because it's going to work
# even if file gets moved without updating the contents.
generator_path = os.path.relpath(__file__, source_tree_root)
header_contents = HEADER_TEMPLATE % {
'generator_path': generator_path,
'unique_prefix': unique_prefix,
'wrapped_header_include': wrapped_header_include,
'class_name': options.name,
'member_decls': ''.join(member_decls),
}
impl_contents = IMPL_TEMPLATE % {
'generator_path': generator_path,
'unique_prefix': unique_prefix,
'generated_header_name': options.output_h,
'class_name': options.name,
'member_init': ''.join(member_init),
'member_cleanup': ''.join(member_cleanup),
}
header_file = open(os.path.join(source_tree_root, options.output_h), 'w')
try:
header_file.write(header_contents)
finally:
header_file.close()
impl_file = open(os.path.join(source_tree_root, options.output_cc), 'w')
try:
impl_file.write(impl_contents)
finally:
impl_file.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Learningtribes/edx-platform | cms/djangoapps/contentstore/management/commands/cleanup_assets.py | 204 | 1226 | """
Script for removing all redundant Mac OS metadata files (with filename ".DS_Store"
or with filename which starts with "._") for all courses
"""
import logging
from django.core.management.base import BaseCommand
from xmodule.contentstore.django import contentstore
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Remove all Mac OS related redundant files for all courses in contentstore
"""
help = 'Remove all Mac OS related redundant file/files for all courses in contentstore'
def handle(self, *args, **options):
"""
Execute the command
"""
content_store = contentstore()
success = False
log.info(u"-" * 80)
log.info(u"Cleaning up assets for all courses")
try:
# Remove all redundant Mac OS metadata files
assets_deleted = content_store.remove_redundant_content_for_courses()
success = True
except Exception as err:
log.info(u"=" * 30 + u"> failed to cleanup")
log.info(u"Error:")
log.info(err)
if success:
log.info(u"=" * 80)
log.info(u"Total number of assets deleted: {0}".format(assets_deleted))
| agpl-3.0 |
AngeliaGong/AngeliaGong.github.io | node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | 9537 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| mit |
LaetitiaPapaxanthos/UnionCom | train.py | 1 | 4021 | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import random
import sys
from PrimeDual import *
from utils import save_model
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def train(Project, params, dataset, dist, P_joint, change, device):
optimizer = optim.RMSprop(Project.parameters(), lr=params.lr)
c_mse = nn.MSELoss()
c_domain = nn.NLLLoss()
cirterion_CE = nn.CrossEntropyLoss()
Project.train()
dataset_num = len(dataset)
for i in range(dataset_num):
P_joint[i] = torch.from_numpy(P_joint[i]).float().to(device)
row = []
col = []
for i in range(dataset_num):
row.append(np.shape(dataset[i])[0])
col.append(np.shape(dataset[i])[1])
N = np.int(np.max([len(l) for l in dataset]))
for epo in range(params.epoch_total):
dataset_anchor = []
dist_anchor = []
cor_pairs = []
for i in range(dataset_num):
random_anchor = random.sample(range(0,row[i]), int(row[i]))
dataset_anchor.append(dataset[i][random_anchor])
dataset_anchor[i] = torch.from_numpy(dataset_anchor[i]).to(device).float()
anchor_num = np.int(row[i])
dist_anchor.append(np.zeros([anchor_num, anchor_num]))
for j in range(anchor_num):
dist_anchor[i][j] = dist[i][random_anchor[j], random_anchor]
for i in range(dataset_num-1):
print("Match corresponding points between Dataset {} and Dataset {}".format(change[i], \
change[dataset_num-1]))
cor_pairs.append(cor_pairs_match_Adam(dist_anchor[i], dist_anchor[-1], N, \
params, col[i], col[-1], epo, device))
print("Finished Matching!")
print("Begin training the Deep Neural Network")
for epoch in range(params.epoch_DNN):
len_dataloader = np.int(np.max(row)/params.batch_size)
if len_dataloader == 0:
print("Please set batch_size smaller!")
sys.exit()
for step in range(len_dataloader):
KL_loss = []
for i in range(dataset_num):
random_batch = np.random.randint(0, row[i], params.batch_size)
data = dataset[i][random_batch]
data = torch.from_numpy(data).to(device).float()
P_tmp = torch.zeros([params.batch_size, params.batch_size]).to(device)
for j in range(params.batch_size):
P_tmp[j] = P_joint[i][random_batch[j], random_batch]
P_tmp = P_tmp / torch.sum(P_tmp)
low_dim_data = Project(data, i)
Q_joint = Q_tsne(low_dim_data)
KL_loss.append(torch.sum(P_tmp * torch.log(P_tmp / Q_joint)))
feature_loss = np.array(0)
feature_loss = torch.from_numpy(feature_loss).to(device).float()
for i in range(dataset_num-1):
low_dim_anchor = Project(dataset_anchor[i], i)
low_dim_anchor_biggest_dataset = Project(dataset_anchor[dataset_num-1][cor_pairs[i]], len(dataset)-1)
feature_loss += c_mse(low_dim_anchor, low_dim_anchor_biggest_dataset)
min_norm = torch.min(torch.norm(low_dim_anchor), torch.norm(low_dim_anchor_biggest_dataset))
feature_loss += torch.abs(torch.norm(low_dim_anchor) - torch.norm(low_dim_anchor_biggest_dataset))/min_norm
loss = params.beta * feature_loss
for i in range(dataset_num):
loss += KL_loss[i]
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % params.log_DNN == 0:
print("[{:4d}/{}] [{:4d}/{}]: loss={:4f}, feature_loss={:4f}".format(epo+1, params.epoch_total, epoch+1, \
params.epoch_DNN, loss.data.item(), feature_loss.data.item()))
return Project
def neg_square_dists(X):
sum_X = torch.sum(X*X, 1)
tmp = torch.add(-2 * X.mm(torch.transpose(X,1,0)), sum_X)
D = torch.add(torch.transpose(tmp,1,0), sum_X)
return -D
def Q_tsne(Y):
distances = neg_square_dists(Y)
inv_distances = torch.pow(1. - distances, -1)
inv_distances = inv_distances - torch.diag(inv_distances.diag(0))
inv_distances = inv_distances + 1e-15
return inv_distances / torch.sum(inv_distances)
| mit |
arpitparmar5739/youtube-dl | youtube_dl/extractor/vine.py | 95 | 4636 | from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..utils import unified_strdate
class VineIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vine\.co/(?:v|oembed)/(?P<id>\w+)'
_TESTS = [{
'url': 'https://vine.co/v/b9KOOWX7HUx',
'md5': '2f36fed6235b16da96ce9b4dc890940d',
'info_dict': {
'id': 'b9KOOWX7HUx',
'ext': 'mp4',
'title': 'Chicken.',
'alt_title': 'Vine by Jack Dorsey',
'description': 'Chicken.',
'upload_date': '20130519',
'uploader': 'Jack Dorsey',
'uploader_id': '76',
},
}, {
'url': 'https://vine.co/v/MYxVapFvz2z',
'md5': '7b9a7cbc76734424ff942eb52c8f1065',
'info_dict': {
'id': 'MYxVapFvz2z',
'ext': 'mp4',
'title': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
'alt_title': 'Vine by Luna',
'description': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
'upload_date': '20140815',
'uploader': 'Luna',
'uploader_id': '1102363502380728320',
},
}, {
'url': 'https://vine.co/v/bxVjBbZlPUH',
'md5': 'ea27decea3fa670625aac92771a96b73',
'info_dict': {
'id': 'bxVjBbZlPUH',
'ext': 'mp4',
'title': '#mw3 #ac130 #killcam #angelofdeath',
'alt_title': 'Vine by Z3k3',
'description': '#mw3 #ac130 #killcam #angelofdeath',
'upload_date': '20130430',
'uploader': 'Z3k3',
'uploader_id': '936470460173008896',
},
}, {
'url': 'https://vine.co/oembed/MYxVapFvz2z.json',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('https://vine.co/v/' + video_id, video_id)
data = self._parse_json(
self._html_search_regex(
r'window\.POST_DATA = { %s: ({.+?}) };\s*</script>' % video_id,
webpage, 'vine data'),
video_id)
formats = [{
'format_id': '%(format)s-%(rate)s' % f,
'vcodec': f['format'],
'quality': f['rate'],
'url': f['videoUrl'],
} for f in data['videoUrls']]
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'alt_title': self._og_search_description(webpage, default=None),
'description': data['description'],
'thumbnail': data['thumbnailUrl'],
'upload_date': unified_strdate(data['created']),
'uploader': data['username'],
'uploader_id': data['userIdStr'],
'like_count': data['likes']['count'],
'comment_count': data['comments']['count'],
'repost_count': data['reposts']['count'],
'formats': formats,
}
class VineUserIE(InfoExtractor):
IE_NAME = 'vine:user'
_VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
_VINE_BASE_URL = "https://vine.co/"
_TESTS = [
{
'url': 'https://vine.co/Visa',
'info_dict': {
'id': 'Visa',
},
'playlist_mincount': 46,
},
{
'url': 'https://vine.co/u/941705360593584128',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
u = mobj.group('u')
profile_url = "%sapi/users/profiles/%s%s" % (
self._VINE_BASE_URL, 'vanity/' if not u else '', user)
profile_data = self._download_json(
profile_url, user, note='Downloading user profile data')
user_id = profile_data['data']['userId']
timeline_data = []
for pagenum in itertools.count(1):
timeline_url = "%sapi/timelines/users/%s?page=%s&size=100" % (
self._VINE_BASE_URL, user_id, pagenum)
timeline_page = self._download_json(
timeline_url, user, note='Downloading page %d' % pagenum)
timeline_data.extend(timeline_page['data']['records'])
if timeline_page['data']['nextPage'] is None:
break
entries = [
self.url_result(e['permalinkUrl'], 'Vine') for e in timeline_data]
return self.playlist_result(entries, user)
| unlicense |
gitenberg-dev/pg-epubmaker | epubmaker/mydocutils/gutenberg/writers/nroff.py | 1 | 6333 | # -*- coding: utf-8 -*-
# $Id: manpage.py 6270 2010-03-18 22:32:09Z milde $
# Author: Engelbert Gruber <[email protected]>
# Copyright: This module is put into the public domain.
# Rewritten almost completely
# by Marcello Perathoner <[email protected]>
"""
Nroff writer for reStructuredText. Tweaked for Project Gutenberg usage.
"""
__docformat__ = 'reStructuredText'
from epubmaker.mydocutils.writers import nroff
from epubmaker import Unitame
from epubmaker.lib.Logger import info, debug, warn, error
GUTENBERG_NROFF_PREAMBLE = r""".\" -*- mode: nroff -*- coding: {encoding} -*-
.\" This file produces Project Gutenberg plain text. Usage:
.\" $ groff -t -K {device} -T {device} this_file > output.txt
.
.pl 100000 \" very tall page: disable pagebreaks
.ll 72m
.po 0
.ad l \" text-align: left
.nh \" hyphenation: off
.cflags 0 .?! \" single sentence space
.cflags 0 -\[hy]\[em] \" don't break on -
.
.de nop
..
.blm nop \" do nothing on empty line
.
.nr [env_cnt] 0
.ev 0 \" start in a defined environment
.
.de push_env
.br
.nr last_env \\n[.ev] \" save current environment name
.nr env_cnt +1 \" generate new environment name
.ev \\n[env_cnt]
.evc \\n[last_env]
..
.de pop_env
.br
.ev
.nr env_cnt -1
..
.
"""
GUTENBERG_NROFF_POSTAMBLE = r""".
.pl 0 \" ends very long page here
.\" End of File
"""
class Writer (nroff.Writer):
""" A plaintext writer thru nroff. """
supported = ('pg-nroff',)
"""Formats this writer supports."""
def __init__ (self):
nroff.Writer.__init__ (self)
self.translator_class = Translator
def translate (self):
visitor = self.translator_class (self.document)
del Unitame.unhandled_chars[:]
self.document.walkabout (visitor)
self.output = visitor.astext ()
if Unitame.unhandled_chars:
error ("unitame: unhandled chars: %s" % u", ".join (set (Unitame.unhandled_chars)))
#def get_transforms (self):
# tfs = writers.Writer.get_transforms (self)
# return tfs + [parts.CharsetTransform]
class Translator (nroff.Translator):
""" nroff translator """
def preamble (self):
""" Inserts nroff preamble. """
return GUTENBERG_NROFF_PREAMBLE.format (
encoding = self.encoding, device = self.device)
def postamble (self):
""" Inserts nroff postamble. """
return GUTENBERG_NROFF_POSTAMBLE.format (
encoding = self.encoding, device = self.device)
def init_translate_maps (self):
nroff.Translator.init_translate_maps (self)
update = {
0x0011: ur"\~", # nbsp, see: Unitame.py
0x0012: ur"\%", # shy, see: Unitame.py
}
self.translate_map.update (update)
self.translate_map_literal.update (update)
def register_classes (self):
""" Register classes.
Use the idiosyncratic PG convention of marking up italics etc.
"""
#
# This does not call the base class !!!
#
self.register_class ('simple', 'left', '.ad l', '')
self.register_class ('simple', 'right', '.ad r', '')
self.register_class ('simple', 'center', '.ad c', '')
self.register_class ('inline', 'italics', '_', '_')
self.register_class ('inline', 'bold', '*', '*')
self.register_class ('inline', 'monospaced', '', '')
self.register_class ('inline', 'superscript', '', '')
self.register_class ('inline', 'subscript', '', '')
self.register_class ('inline', 'small-caps', '_', '_')
self.register_class ('inline', 'gesperrt', '_', '_')
self.register_class ('inline', 'antiqua', '_', '_')
self.register_class ('inline', 'larger', '', '')
self.register_class ('inline', 'smaller', '', '')
def translate (self, text):
""" Reduce the charset while keeping text a unicode string. """
# NOTE: there's an alternate approach in
# transforms.parts.CharsetTransform
if self.encoding != 'utf-8':
text = text.encode (self.encoding, 'unitame')
text = text.decode (self.encoding)
if self.in_literal:
text = text.translate (self.translate_map_literal)
else:
text = text.translate (self.translate_map)
return text
def visit_inner (self, node):
""" Try to remove duplicated PG highlight markers. """
if node.type == 'inline':
prefixes = self.get_prefix (node.type, node['classes'])
for prefix in prefixes:
if prefix == self.last_output_char:
self.backspace ()
else:
self.text (prefix)
else:
nroff.Translator.visit_inner (self, node)
def visit_inline (self, node):
if 'toc-pageref' in node['classes']:
maxlen = 3 # sensible default
while node.parent:
node = node.parent
if 'pageno_maxlen' in node:
maxlen = node['pageno_maxlen']
break
self.cmd (('linetabs 1',
r'ta (\n[.l]u - \n[.i]u - %dm) +%dmR' % (maxlen + 1, maxlen + 1),
r'lc .'))
self.text (chr (1) + '\t')
nroff.Translator.visit_inline (self, node)
def visit_section_title (self, node):
""" Implements PG-standard spacing before headers. """
self.sp (max (2, 5 - self.section_level))
def visit_figure (self, node):
self.sp (1)
self.push ()
def depart_figure (self, node):
self.pop ()
self.sp (1)
def visit_image (self, node):
# ignore alt attribute except for dropcaps
if 'dropcap' in node['classes']:
self.text (node.attributes.get ('alt', ''))
def visit_page (self, node):
if 'clearpage' in node['classes']:
self.sp (4)
elif 'cleardoublepage' in node['classes']:
self.sp (4)
else:
nroff.Translator.visit_page (self, node)
| gpl-3.0 |
rajadhva/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/example/abort_handshake_wsh.py | 465 | 1781 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_do_extra_handshake")
def web_socket_transfer_data(request):
pass
# vi:sts=4 sw=4 et
| mpl-2.0 |
windyuuy/opera | chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py | 4 | 2380 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
_log = logging.getLogger(__name__)
class AbstractSequencedCommand(AbstractDeclarativeCommand):
steps = None
def __init__(self):
self._sequence = StepSequence(self.steps)
AbstractDeclarativeCommand.__init__(self, self._sequence.options())
def _prepare_state(self, options, args, tool):
return None
def execute(self, options, args, tool):
try:
state = self._prepare_state(options, args, tool)
except ScriptError, e:
_log.error(e.message_with_output())
self._exit(e.exit_code or 2)
self._sequence.run_and_handle_errors(tool, options, state)
| bsd-3-clause |
qutip/qutip-benchmark | continuous-benchmark/benchmarks/mesolve_8spin.py | 1 | 2364 | #
# qutip benchmark: mesolve 8 spin chain
#
import time
try:
from numpy import *
from qutip import *
except:
print("nan")
import sys
sys.exit(1)
def benchmark(runs=1):
"""
mesolver evolution of 8-spin chain
"""
test_name='8-spin ME [256]'
N = 8# number of spins
# uniform parameters
h = 1.0 * 2 * pi * ones(N)
Jz = 0.1 * 2 * pi * ones(N)
Jx = 0.1 * 2 * pi * ones(N)
Jy = 0.1 * 2 * pi * ones(N)
# dephasing rate
gamma = 0.01 * ones(N)
# intial state, first spin in state |1>, the rest in state |0>
psi_list = []
psi_list.append(basis(2,1))
for n in range(N-1):
psi_list.append(basis(2,0))
psi0 = tensor(psi_list)
tlist = linspace(0, 10, 200)
# Hamiltonian
si = qeye(2)
sx = sigmax()
sy = sigmay()
sz = sigmaz()
sx_list = []
sy_list = []
sz_list = []
for n in range(N):
op_list = []
for m in range(N):
op_list.append(si)
op_list[n] = sx
sx_list.append(tensor(op_list))
op_list[n] = sy
sy_list.append(tensor(op_list))
op_list[n] = sz
sz_list.append(tensor(op_list))
# construct the hamiltonian
H = 0
# energy splitting terms
for n in range(N):
H += - 0.5 * h[n] * sz_list[n]
# interaction terms
for n in range(N-1):
H += - 0.5 * Jx[n] * sx_list[n] * sx_list[n+1]
H += - 0.5 * Jy[n] * sy_list[n] * sy_list[n+1]
H += - 0.5 * Jz[n] * sz_list[n] * sz_list[n+1]
# collapse operators
c_op_list = []
# spin dephasing
for n in range(N):
c_op_list.append(sqrt(gamma[n]) * sz_list[n])
# evolve and calculate expectation values
tot_elapsed = 0
for n in range(runs):
tic = time.time()
mesolve(H, psi0, tlist, c_op_list, sz_list)
toc = time.time()
tot_elapsed += toc - tic
return tot_elapsed / runs
if __name__ == "__main__":
try:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--number-of-runs",
help="number of times to run the benchmark",
default=1, type=int)
args = parser.parse_args()
print(benchmark(args.number_of_runs))
except Exception as e:
print(e)
print("nan")
| bsd-3-clause |
LudditeLabs/query-reform | data/samples/Python/2.py | 1 | 9902 | #!/usr/bin/env python
# encoding: utf-8
# The benchmark is distributed under the Creative Commons,
# Attribution-NonCommercial-NoDerivatives. This license includes the benchmark database
# and its derivatives. For attribution, please cite this page, and our publications below.
# This data is provided free of charge for non-commercial and academic benchmarking and
# experimentation use. If you would like to contribute to the benchmark, please contact us.
# If you believe you intended usage may be restricted by the license,
# please contact us and we can discuss the possibilities.
"""
pytime
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2015 by Sinux <[email protected]>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser, str_tuple
from .exception import CanNotFormatError, UnexpectedTypeError
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def daysrange(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def lastday(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), lastday(_arg.year, _arg.month) if clean \
else lastday(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = lastday(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = lastday(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def newyear(year=None):
return datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
return datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
def christeve(year=None):
return yesterday(christmas(year))
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 3rd Sunday in June
:param year: int
:return: Father's day
"""
june_first = datetime.date(_year, 6, 1) if not year else datetime.date(int(year), 6, 1)
weekday_seq = june_first.weekday()
return datetime.date(june_first.year, 6, (21 - weekday_seq))
def halloween(year=None):
return lastday(month=10) if not year else lastday(year, 10)
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 - d))
def thanks(year=None):
"""
4rd Thursday in Nov
:param year: int
:return: Thanksgiving Day
"""
nov_first = datetime.date(_year, 11, 1) if not year else datetime.date(int(year), 11, 1)
weekday_seq = nov_first.weekday()
if weekday_seq > 3:
current_day = 32 - weekday_seq
else:
current_day = 25 - weekday_seq
return datetime.date(nov_first.year, 11, current_day)
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
| apache-2.0 |
kaostao/sx | src/obelisk/client.py | 2 | 6958 | import struct
from decimal import Decimal
from twisted.internet import reactor
from zmqbase import ClientBase
import bitcoin
import models
import serialize
import error_code
def unpack_error(data):
value = struct.unpack_from('<I', data, 0)[0]
return error_code.error_code.name_from_id(value)
def pack_block_index(index):
if type(index) == str:
assert len(index) == 32
return serialize.ser_hash(index)
elif type(index) == int:
return struct.pack('<I', index)
else:
raise ValueError("Unknown index type")
class ObeliskOfLightClient(ClientBase):
valid_messages = ['fetch_block_header', 'fetch_history', 'subscribe',
'fetch_last_height', 'fetch_transaction', 'fetch_spend',
'fetch_transaction_index', 'fetch_block_transaction_hashes',
'fetch_block_height', 'update', 'renew']
subscribed = 0
# Command implementations
def renew_address(self, address):
address_version, address_hash = \
bitcoin.bc_address_to_hash_160(address)
# prepare parameters
data = struct.pack('B', address_version) # address version
data += address_hash[::-1] # address
# run command
self.send_command('address.renew', data)
# renew triggered again on response
reactor.callLater(120, self.renew_address, address)
def subscribe_address(self, address, notification_cb=None, cb=None):
address_version, address_hash = \
bitcoin.bc_address_to_hash_160(address)
# prepare parameters
data = struct.pack('B', address_version) # address version
data += address_hash[::-1] # address
# run command
self.send_command('address.subscribe', data, cb)
if notification_cb:
self._subscriptions['address'][address_hash] = notification_cb
reactor.callLater(120, self.renew_address, address)
def fetch_block_header(self, index, cb):
"""Fetches the block header by height."""
data = pack_block_index(index)
self.send_command('blockchain.fetch_block_header', data, cb)
def fetch_history(self, address, cb, from_height=0):
"""Fetches the output points, output values, corresponding input point
spends and the block heights associated with a Bitcoin address.
The returned history is a list of rows with the following fields:
output
output_height
value
spend
spend_height
If an output is unspent then the input spend hash will be equivalent
to null_hash.
Summing the list of values for unspent outpoints gives the balance
for an address.
"""
address_version, address_hash = \
bitcoin.bc_address_to_hash_160(address)
# prepare parameters
data = struct.pack('B', address_version) # address version
data += address_hash[::-1] # address
data += struct.pack('<I', from_height) # from_height
# run command
self.send_command('address.fetch_history', data, cb)
def fetch_last_height(self, cb):
"""Fetches the height of the last block in our blockchain."""
self.send_command('blockchain.fetch_last_height', cb=cb)
def fetch_transaction(self, tx_hash, cb):
"""Fetches a transaction by hash."""
data = serialize.ser_hash(tx_hash)
self.send_command('blockchain.fetch_transaction', data, cb)
def fetch_spend(self, outpoint, cb):
"""Fetches a corresponding spend of an output."""
data = outpoint.serialize()
self.send_command('blockchain.fetch_spend', data, cb)
def fetch_transaction_index(self, tx_hash, cb):
"""Fetch the block height that contains a transaction and its index
within a block."""
data = serialize.ser_hash(tx_hash)
self.send_command('blockchain.fetch_transaction_index', data, cb)
def fetch_block_transaction_hashes(self, index, cb):
"""Fetches list of transaction hashes in a block by block hash."""
data = pack_block_index(index)
self.send_command('blockchain.fetch_block_transaction_hashes',
data, cb)
def fetch_block_height(self, blk_hash, cb):
"""Fetches the height of a block given its hash."""
data = serialize.ser_hash(blk_hash)
self.send_command('blockchain.fetch_block_height', data, cb)
# receive handlers
def _on_fetch_block_header(self, data):
error = unpack_error(data)
assert len(data[4:]) == 80
header = models.BlockHeader.deserialize(data[4:])
return (error, header)
def _on_fetch_history(self, data):
error = unpack_error(data)
# parse results
rows = self.unpack_table("<32sIIQ32sII", data, 4)
return (error, rows)
def _on_fetch_last_height(self, data):
error = unpack_error(data)
height = struct.unpack('<I', data[4:])[0]
return (error, height)
def _on_fetch_transaction(self, data):
error = unpack_error(data)
tx = serialize.deser_tx(data[4:])
return (error, tx)
def _on_fetch_spend(self, data):
error = unpack_error(data)
spend = serialize.deser_output_point(data[4:])
return (error, spend)
def _on_fetch_transaction_index(self, data):
error = unpack_error(data)
height, index = struct.unpack("<II", data[4:])
return (error, height, index)
def _on_fetch_block_transaction_hashes(self, data):
error = unpack_error(data)
rows = self.unpack_table("32s", data, 4)
hashes = [row[0][::-1] for row in rows]
return (error, hashes)
def _on_fetch_block_height(self, data):
error = unpack_error(data)
height = struct.unpack('<I', data[4:])[0]
return (error, height)
def _on_subscribe(self, data):
self.subscribed += 1
error = unpack_error(data)
if error:
print "Error subscribing"
if not self.subscribed%1000:
print "Subscribed ok", self.subscribed
return (error, True)
def _on_update(self, data):
address_version = struct.unpack_from('B', data, 0)[0]
address_hash = data[1:21][::-1]
address = bitcoin.hash_160_to_bc_address(address_hash, address_version)
height = struct.unpack_from('I', data, 21)[0]
block_hash = data[25:57]
tx = data[57:]
if address_hash in self._subscriptions['address']:
self._subscriptions['address'][address_hash](address_version, address_hash, height, block_hash, tx)
def _on_renew(self, data):
self.subscribed += 1
error = unpack_error(data)
if error:
print "Error subscribing"
if not self.subscribed%1000:
print "Renew ok", self.subscribed
| agpl-3.0 |
a-doumoulakis/tensorflow | tensorflow/contrib/rnn/python/ops/core_rnn_cell.py | 39 | 8591 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells that used to be in core.
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
RNNCell = rnn_cell_impl.RNNCell # pylint: disable=invalid-name
_linear = rnn_cell_impl._linear # pylint: disable=invalid-name, protected-access
_like_rnncell = rnn_cell_impl._like_rnncell # pylint: disable=invalid-name, protected-access
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self,
cell,
embedding_classes,
embedding_size,
initializer=None,
reuse=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
super(EmbeddingWrapper, self).__init__(_reuse=reuse)
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the cell on embedded inputs."""
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if isinstance(state, tuple):
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(embedding,
array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self,
cell,
num_proj,
activation=None,
input_size=None,
reuse=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
activation: (optional) an optional activation function.
input_size: Deprecated and unused.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
"""
super(InputProjectionWrapper, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
self._activation = activation
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
projected = _linear(inputs, self._num_proj, True)
if self._activation:
projected = self._activation(projected)
return self._cell(projected, state)
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size, activation=None, reuse=None):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
activation: (optional) an optional activation function.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
super(OutputProjectionWrapper, self).__init__(_reuse=reuse)
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
self._activation = activation
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
projected = _linear(output, self._output_size, True)
if self._activation:
projected = self._activation(projected)
return projected, res_state
| apache-2.0 |
gavin-feng/odoo | addons/stock_account/__init__.py | 384 | 1060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product
import stock_account
import stock
import wizard
import res_config
| agpl-3.0 |
RachaelT/UTDchess-RospyXbee | src/scripts/cmd_vel_listener.py | 1 | 4756 | #!/usr/bin/env python
import sys
import rospy
import serial
import struct
import binascii
import time
from teleop_twist_keyboard.msg import Command
from xbee import ZigBee
xbee = None
XBEE_ADDR_LONG = '\x00\x13\xA2\x00\x40\x86\x96\x4F'
XBEE_ADDR_SHORT = '\xFF\xFE'
DEVICE = '/dev/tty.usbserial-A603HA9K'
#Each bot will have an addr long, addr short, and id stored. Coordinator is always id 0.
bot_array = []
final_bot_array = []
def find_bots():
#The coordinator broadcasts a "Node Discover" AT command and records the addresses recieved, I suspect
#Add coordinator first, then add on each bot as it responds.
#robot id command
global xbee
ser = serial.Serial(DEVICE, 57600)
xbee = ZigBee(ser)
try:
print("Searching for bots...")
xbee.at(
dest_addr_long = XBEE_ADDR_LONG,
dest_addr = XBEE_ADDR_SHORT,
command = 'ND'
)
timeout = time.time() + 30
num_of_robots = 0
while timeout > time.time():
dict = xbee.wait_read_frame()
if dict == None:
break
bot_array.append(parse_ND(dict))
print "Response: %r " % bot_array[num_of_robots]
num_of_robots += 1
except KeyboardInterrupt, SerialException:
sys.exit(0)
def get_bot_id(addr_long, addr_short):
#STILL NEEDS WORK
data = struct.pack('c', str(unichr(2)))
for item in bot_array:
addr_long = hex_to_addr(item['addr_long'])
addr_short = hex_to_addr(item['addr_short'])
xbee.tx(
dest_addr_long = addr_long,
dest_addr = addr_short,
data=data,
)
ans = xbee.wait_read_frame()
if ans == None:
print "Could not retrieve bot id"
print ans[6:12]
def search_for_bot(id):
#Takes the xbee ID, returns the xbee's dictionary
for dict in bot_array:
if dict['id'] == id:
return dict
def prepare_move_cmd(msg):
code = str(unichr(4))
#Packages the command message as binary for the API Frame
move = str(unichr(msg.direction))
speed = str(unichr(msg.magnitude))
turn = str(unichr(msg.turn))
accel = str(unichr(msg.accel))
data = struct.pack('ccccc', code, move, speed, turn, accel)
return data
def cmd_vel_command(msg):
data = prepare_move_cmd(msg)
rospy.loginfo("Sending: %s" % binascii.hexlify(data))
#Sends the message
xbee.tx(
dest_addr_long = XBEE_ADDR_LONG,
dest_addr = XBEE_ADDR_SHORT,
data=data,
)
print parse(xbee.wait_read_frame())
def callback(msg):
rospy.loginfo("Received a /cmd_vel message!")
cmd_vel_command(msg)
def listener():
global xbee
ser = serial.Serial(DEVICE, 57600)
xbee = ZigBee(ser)
# in ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'talker' node so that multiple talkers can
# run simultaenously.
print "Ready to receive commands."
rospy.init_node('cmd_vel_listener', anonymous=True)
rospy.Subscriber("/cmd_hex", Command, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
xbee.halt()
ser.close()
def parse(frame):
#Parses the transmit status for relevant info
info = {
'length': str(len(frame)/2),
'frame_id': frame[0:2],
'addr': frame[2:8],
'retry': frame[8:10],
'status': frame[10:12]
}
return info
def parse_ND(frame):
#parses the node discovery response for relevant info
info = {
'length': str(len(frame)/2),
'frame_id': frame[0:2],
'command': frame[2:6],
'status': frame[6:8],
'addr_short': frame[8:12],
'addr_long': frame[12:28],
'id': frame[39:40]
}
return info
def hex_to_addr(adhex):
#Changes the hex address given by the dictionary
#to a format usable by the xbee. Works on long and short.
address = binascii.unhexlify(adhex)
return address
def send_move_command(id, msg)
dict = search_for_bot(id)
data = prepare_move_cmd(msg)
rospy.loginfo("Sending: %s" % binascii.hexlify(data))
#Sends the message
xbee.tx(
dest_addr_long = unhexlify(dict['addr_long']),
dest_addr = unhexlify(dict['addr_short']),
data=data,
)
rospy.loginfo(" ")parse(xbee.wait_read_frame())
if __name__ == '__main__':
find_bots()
#print search_for_bot('1')
#listener()
get_bot_id(XBEE_ADDR_LONG, XBEE_ADDR_SHORT)
#tf package
| mit |
PlanTool/plantool | wrappingPlanners/Deterministic/LAMA/planning-lama(finished)/translate/search.py | 4 | 2619 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_search', [dirname(__file__)])
except ImportError:
import _search
return _search
if fp is not None:
try:
_mod = imp.load_module('_search', fp, pathname, description)
finally:
fp.close()
return _mod
_search = swig_import_helper()
del swig_import_helper
else:
import _search
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def oldmain(argc):
return _search.oldmain(argc)
oldmain = _search.oldmain
# This file is compatible with both classic and new-style classes.
| gpl-2.0 |
nickster5001/ctracker | flask/lib/python3.4/site-packages/pip/_vendor/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| mit |
jokimies/django-pj-portfolio | portfolio/forms.py | 1 | 1749 | from django import forms
from portfolio.models import Security, Transaction, Account
from currency_history.models import Currency
class BuyForm(forms.ModelForm):
#def __init__(self, *pa, **ka):
#super(BuyForm, self).__init__(*pa, **ka)
#self.fields['security'].queryset = Security.objects.all()
class Meta:
model = Transaction
exclude = ['account', 'cash_amount', 'sec_fee', 'split_ratio', ]
class DepositWithdrawForm(forms.ModelForm):
def __init__(self, *pa, **ka):
super(DepositWithdrawForm, self).__init__(*pa, **ka)
self.fields['security'].queryset = Security.objects.filter(name='$CASH')
class Meta:
model = Transaction
exclude = ['account', 'action', 'shares', 'price', 'commission',
'sec_fee', 'split_ratio', ]
class InterestForm(forms.Form):
date = forms.DateField()
amount = forms.DecimalField()
class DivForm(forms.ModelForm):
class Meta:
model = Transaction
exclude = ['account', 'action', 'shares', 'sec_fee', 'split_ratio', ]
class TxnBySecurityForm(forms.ModelForm):
class Meta:
model = Transaction
exclude = ['account', 'action', 'shares', 'sec_fee', 'split_ratio',
'cash_amount', 'commission', 'price', 'date',
'currency', 'exchange_rate']
class AccountForm(forms.ModelForm):
base_currency = forms.ModelChoiceField(queryset=Currency.objects.all(),initial={'base_currency':'USD'})
class Meta:
model = Account
fields = ['name', 'base_currency']
class TransactionDetailForm(forms.ModelForm):
class Meta:
model = Transaction
exclude = ['cash_amount', 'sec_fee', 'split_ratio']
| bsd-3-clause |
ojake/django | django/contrib/gis/gdal/__init__.py | 327 | 2635 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import ( # NOQA
GDALException, OGRException, OGRIndexError, SRSException, check_err,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDALRaster', 'GDAL_VERSION', 'SpatialReference', 'CoordTransform',
'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
| bsd-3-clause |
edisonlz/fruit | web_project/base/site-packages/django_extensions/db/fields/__init__.py | 39 | 14535 | """
Django Extensions additional model fields
"""
import re
import six
import warnings
try:
import uuid
HAS_UUID = True
except ImportError:
HAS_UUID = False
try:
import shortuuid
HAS_SHORT_UUID = True
except ImportError:
HAS_SHORT_UUID = False
from django.core.exceptions import ImproperlyConfigured
from django.template.defaultfilters import slugify
from django.db.models import DateTimeField, CharField, SlugField
try:
from django.utils.timezone import now as datetime_now
assert datetime_now
except ImportError:
import datetime
datetime_now = datetime.datetime.now
try:
from django.utils.encoding import force_unicode # NOQA
except ImportError:
from django.utils.encoding import force_text as force_unicode # NOQA
class AutoSlugField(SlugField):
""" AutoSlugField
By default, sets editable=False, blank=True.
Required arguments:
populate_from
Specifies which field or list of fields the slug is populated from.
Optional arguments:
separator
Defines the used separator (default: '-')
overwrite
If set to True, overwrites the slug on every save (default: False)
Inspired by SmileyChris' Unique Slugify snippet:
http://www.djangosnippets.org/snippets/690/
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('editable', False)
populate_from = kwargs.pop('populate_from', None)
if populate_from is None:
raise ValueError("missing 'populate_from' argument")
else:
self._populate_from = populate_from
self.slugify_function = kwargs.pop('slugify_function', slugify)
self.separator = kwargs.pop('separator', six.u('-'))
self.overwrite = kwargs.pop('overwrite', False)
if not isinstance(self.overwrite, bool):
raise ValueError("'overwrite' argument must be True or False")
self.allow_duplicates = kwargs.pop('allow_duplicates', False)
if not isinstance(self.allow_duplicates, bool):
raise ValueError("'allow_duplicates' argument must be True or False")
super(AutoSlugField, self).__init__(*args, **kwargs)
def _slug_strip(self, value):
"""
Cleans up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)
def get_queryset(self, model_cls, slug_field):
for field, model in model_cls._meta.get_fields_with_model():
if model and field == slug_field:
return model._default_manager.all()
return model_cls._default_manager.all()
def slugify_func(self, content):
if content:
return self.slugify_function(content)
return ''
def create_slug(self, model_instance, add):
# get fields to populate from and slug field to set
if not isinstance(self._populate_from, (list, tuple)):
self._populate_from = (self._populate_from, )
slug_field = model_instance._meta.get_field(self.attname)
if add or self.overwrite:
# slugify the original field content and set next step to 2
slug_for_field = lambda field: self.slugify_func(getattr(model_instance, field))
slug = self.separator.join(map(slug_for_field, self._populate_from))
next = 2
else:
# get slug from the current model instance
slug = getattr(model_instance, self.attname)
# model_instance is being modified, and overwrite is False,
# so instead of doing anything, just return the current slug
return slug
# strip slug depending on max_length attribute of the slug field
# and clean-up
slug_len = slug_field.max_length
if slug_len:
slug = slug[:slug_len]
slug = self._slug_strip(slug)
original_slug = slug
if self.allow_duplicates:
return slug
# exclude the current model instance from the queryset used in finding
# the next valid slug
queryset = self.get_queryset(model_instance.__class__, slug_field)
if model_instance.pk:
queryset = queryset.exclude(pk=model_instance.pk)
# form a kwarg dict used to impliment any unique_together contraints
kwargs = {}
for params in model_instance._meta.unique_together:
if self.attname in params:
for param in params:
kwargs[param] = getattr(model_instance, param, None)
kwargs[self.attname] = slug
# increases the number while searching for the next valid slug
# depending on the given slug, clean-up
while not slug or queryset.filter(**kwargs):
slug = original_slug
end = '%s%s' % (self.separator, next)
end_len = len(end)
if slug_len and len(slug) + end_len > slug_len:
slug = slug[:slug_len - end_len]
slug = self._slug_strip(slug)
slug = '%s%s' % (slug, end)
kwargs[self.attname] = slug
next += 1
return slug
def pre_save(self, model_instance, add):
value = force_unicode(self.create_slug(model_instance, add))
setattr(model_instance, self.attname, value)
return value
def get_internal_type(self):
return "SlugField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = '%s.AutoSlugField' % self.__module__
args, kwargs = introspector(self)
kwargs.update({
'populate_from': repr(self._populate_from),
'separator': repr(self.separator),
'overwrite': repr(self.overwrite),
'allow_duplicates': repr(self.allow_duplicates),
})
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(AutoSlugField, self).deconstruct()
kwargs['populate_from'] = self._populate_from
if not self.separator == six.u('-'):
kwargs['separator'] = self.separator
if self.overwrite is not False:
kwargs['overwrite'] = True
if self.allow_duplicates is not False:
kwargs['allow_duplicates'] = True
return name, path, args, kwargs
class CreationDateTimeField(DateTimeField):
""" CreationDateTimeField
By default, sets editable=False, blank=True, default=datetime.now
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('editable', False)
kwargs.setdefault('blank', True)
kwargs.setdefault('default', datetime_now)
DateTimeField.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "DateTimeField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.DateTimeField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(CreationDateTimeField, self).deconstruct()
if self.editable is not False:
kwargs['editable'] = True
if self.blank is not True:
kwargs['blank'] = False
if self.default is not datetime_now:
kwargs['default'] = self.default
return name, path, args, kwargs
class ModificationDateTimeField(CreationDateTimeField):
""" ModificationDateTimeField
By default, sets editable=False, blank=True, default=datetime.now
Sets value to datetime.now() on each save of the model.
"""
def pre_save(self, model, add):
value = datetime_now()
setattr(model, self.attname, value)
return value
def get_internal_type(self):
return "DateTimeField"
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.DateTimeField"
args, kwargs = introspector(self)
return (field_class, args, kwargs)
class UUIDVersionError(Exception):
pass
class UUIDField(CharField):
""" UUIDField
By default uses UUID version 4 (randomly generated UUID).
The field support all uuid versions which are natively supported by the uuid python module, except version 2.
For more information see: http://docs.python.org/lib/module-uuid.html
"""
DEFAULT_MAX_LENGTH = 36
def __init__(self, verbose_name=None, name=None, auto=True, version=4, node=None, clock_seq=None, namespace=None, uuid_name=None, *args, **kwargs):
warnings.warn("Django 1.8 features a native UUIDField, this UUIDField will be removed after Django 1.7 becomes unsupported.", DeprecationWarning)
if not HAS_UUID:
raise ImproperlyConfigured("'uuid' module is required for UUIDField. (Do you have Python 2.5 or higher installed ?)")
kwargs.setdefault('max_length', self.DEFAULT_MAX_LENGTH)
if auto:
self.empty_strings_allowed = False
kwargs['blank'] = True
kwargs.setdefault('editable', False)
self.auto = auto
self.version = version
self.node = node
self.clock_seq = clock_seq
self.namespace = namespace
self.uuid_name = uuid_name or name
super(UUIDField, self).__init__(verbose_name=verbose_name, *args, **kwargs)
def create_uuid(self):
if not self.version or self.version == 4:
return uuid.uuid4()
elif self.version == 1:
return uuid.uuid1(self.node, self.clock_seq)
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
return uuid.uuid3(self.namespace, self.uuid_name)
elif self.version == 5:
return uuid.uuid5(self.namespace, self.uuid_name)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
def pre_save(self, model_instance, add):
value = super(UUIDField, self).pre_save(model_instance, add)
if self.auto and add and value is None:
value = force_unicode(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
else:
if self.auto and not value:
value = force_unicode(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
def formfield(self, **kwargs):
if self.auto:
return None
return super(UUIDField, self).formfield(**kwargs)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.CharField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
if kwargs.get('max_length', None) == self.DEFAULT_MAX_LENGTH:
del kwargs['max_length']
if self.auto is not True:
kwargs['auto'] = self.auto
if self.version != 4:
kwargs['version'] = self.version
if self.node is not None:
kwargs['node'] = self.node
if self.clock_seq is not None:
kwargs['clock_seq'] = self.clock_seq
if self.namespace is not None:
kwargs['namespace'] = self.namespace
if self.uuid_name is not None:
kwargs['uuid_name'] = self.name
return name, path, args, kwargs
class PostgreSQLUUIDField(UUIDField):
def __init__(self, *args, **kwargs):
warnings.warn("Django 1.8 features a native UUIDField, this UUIDField will be removed after Django 1.7 becomes unsupported.", DeprecationWarning)
super(PostgreSQLUUIDField, self).__init__(*args, **kwargs)
def db_type(self, connection=None):
return "UUID"
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, six.integer_types):
value = uuid.UUID(int=value)
elif isinstance(value, (six.string_types, six.binary_type)):
if len(value) == 16:
value = uuid.UUID(bytes=value)
else:
value = uuid.UUID(value)
return super(PostgreSQLUUIDField, self).get_db_prep_value(
value, connection, prepared=False)
class ShortUUIDField(UUIDField):
""" ShortUUIDFied
Generates concise (22 characters instead of 36), unambiguous, URL-safe UUIDs.
Based on `shortuuid`: https://github.com/stochastic-technologies/shortuuid
"""
DEFAULT_MAX_LENGTH = 22
def __init__(self, *args, **kwargs):
super(ShortUUIDField, self).__init__(*args, **kwargs)
if not HAS_SHORT_UUID:
raise ImproperlyConfigured("'shortuuid' module is required for ShortUUIDField. (Do you have Python 2.5 or higher installed ?)")
kwargs.setdefault('max_length', self.DEFAULT_MAX_LENGTH)
def create_uuid(self):
if not self.version or self.version == 4:
return shortuuid.uuid()
elif self.version == 1:
return shortuuid.uuid()
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
raise UUIDVersionError("UUID version 3 is not supported.")
elif self.version == 5:
return shortuuid.uuid(name=self.namespace)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
| apache-2.0 |
stefanfoulis/django-multilingual | multilingual/admin.py | 2 | 8098 | from django.contrib import admin
from django.forms.models import BaseInlineFormSet
from django.forms.fields import BooleanField
from django.forms.formsets import DELETION_FIELD_NAME
from django.forms.util import ErrorDict
from django.utils.translation import ugettext as _
from multilingual.languages import *
from multilingual.utils import is_multilingual_model
def _translation_form_full_clean(self, previous_full_clean):
"""
There is a bug in Django that causes inline forms to be
validated even if they are marked for deletion.
This function fixes that by disabling validation
completely if the delete field is marked and only copying
the absolutely required fields: PK and FK to parent.
TODO: create a fix for Django, have it accepted into trunk and get
rid of this monkey patch.
"""
def cleaned_value(name):
field = self.fields[name]
val = field.widget.value_from_datadict(self.data, self.files,
self.add_prefix(name))
return field.clean(val)
delete = cleaned_value(DELETION_FIELD_NAME)
if delete:
# this object is to be skipped or deleted, so only
# construct the minimal cleaned_data
self.cleaned_data = {'DELETE': delete,
'id': cleaned_value('id')}
self._errors = ErrorDict()
else:
return previous_full_clean()
class TranslationInlineFormSet(BaseInlineFormSet):
def _construct_forms(self):
## set the right default values for language_ids of empty (new) forms
super(TranslationInlineFormSet, self)._construct_forms()
empty_forms = []
lang_id_list = get_language_id_list()
lang_to_form = dict(zip(lang_id_list, [None] * len(lang_id_list)))
for form in self.forms:
language_id = form.initial.get('language_id')
if language_id:
lang_to_form[language_id] = form
else:
empty_forms.append(form)
for language_id in lang_id_list:
form = lang_to_form[language_id]
if form is None:
form = empty_forms.pop(0)
form.initial['language_id'] = language_id
def add_fields(self, form, index):
super(TranslationInlineFormSet, self).add_fields(form, index)
previous_full_clean = form.full_clean
form.full_clean = lambda: _translation_form_full_clean(form, \
previous_full_clean)
class TranslationModelAdmin(admin.StackedInline):
template = "admin/edit_inline_translations_newforms.html"
fk_name = 'master'
extra = get_language_count()
max_num = get_language_count()
formset = TranslationInlineFormSet
class ModelAdminClass(admin.ModelAdmin.__metaclass__):
"""
A metaclass for ModelAdmin below.
"""
def __new__(cls, name, bases, attrs):
# Move prepopulated_fields somewhere where Django won't see
# them. We have to handle them ourselves.
prepopulated_fields = attrs.get('prepopulated_fields', {})
attrs['prepopulated_fields'] = {}
attrs['_dm_prepopulated_fields'] = prepopulated_fields
return super(ModelAdminClass, cls).__new__(cls, name, bases, attrs)
class ModelAdmin(admin.ModelAdmin):
"""
All model admins for multilingual models must inherit this class
instead of django.contrib.admin.ModelAdmin.
"""
__metaclass__ = ModelAdminClass
def _media(self):
media = super(ModelAdmin, self)._media()
if getattr(self.__class__, '_dm_prepopulated_fields', None):
from django.conf import settings
media.add_js(['%sjs/urlify.js' % (settings.ADMIN_MEDIA_PREFIX,)])
return media
media = property(_media)
def render_change_form(self, request, context, add=False, change=False,
form_url='', obj=None):
# I'm overriding render_change_form to inject information
# about prepopulated_fields
trans_model = self.model._meta.translation_model
trans_fields = trans_model._meta.translated_fields
adminform = context['adminform']
form = adminform.form
def field_name_to_fake_field(field_name):
"""
Return something that looks like a form field enough to
fool prepopulated_fields_js.html
For field_names of real fields in self.model this actually
returns a real form field.
"""
try:
field, language_id = trans_fields[field_name]
if language_id is None:
language_id = get_default_language()
# TODO: we have this mapping between language_id and
# field id in two places -- here and in
# edit_inline_translations_newforms.html
# It is not DRY.
field_idx = language_id - 1
ret = {'auto_id': 'id_translations-%d-%s' % \
(field_idx, field.name)}
except:
ret = form[field_name]
return ret
adminform.prepopulated_fields = [{
'field': field_name_to_fake_field(field_name),
'dependencies': [field_name_to_fake_field(f) for f in dependencies]
} for field_name, dependencies in self._dm_prepopulated_fields.items()]
return super(ModelAdmin, self).render_change_form(request, context,
add, change, form_url, obj)
def get_translation_modeladmin(cls, model):
if hasattr(cls, 'Translation'):
tr_cls = cls.Translation
if not issubclass(tr_cls, TranslationModelAdmin):
raise ValueError, ("%s.Translation must be a subclass " \
" of multilingual.TranslationModelAdmin.") % \
cls.name
else:
tr_cls = type("%s.Translation" % cls.__name__, (TranslationModelAdmin,), {})
tr_cls.model = model._meta.translation_model
return tr_cls
# TODO: multilingual_modeladmin_new should go away soon. The code will
# be split between the ModelAdmin class, its metaclass and validation
# code.
def multilingual_modeladmin_new(cls, model, admin_site, obj=None):
if is_multilingual_model(model):
if cls is admin.ModelAdmin:
# the model is being registered with the default
# django.contrib.admin.options.ModelAdmin. Replace it
# with our ModelAdmin, since it is safe to assume it is a
# simple call to admin.site.register without just model
# passed
# subclass it, because we need to set the inlines class
# attribute below
cls = type("%sAdmin" % model.__name__, (ModelAdmin,), {})
# make sure it subclasses multilingual.ModelAdmin
if not issubclass(cls, ModelAdmin):
from warnings import warn
warn("%s should be registered with a subclass of "
" of multilingual.ModelAdmin." % model, DeprecationWarning)
# if the inlines already contain a class for the
# translation model, use it and don't create another one
translation_modeladmin = None
for inline in getattr(cls, 'inlines', []):
if inline.model == model._meta.translation_model:
translation_modeladmin = inline
if not translation_modeladmin:
translation_modeladmin = get_translation_modeladmin(cls, model)
if cls.inlines:
cls.inlines = type(cls.inlines)((translation_modeladmin,)) + cls.inlines
else:
cls.inlines = [translation_modeladmin]
return admin.ModelAdmin._original_new_before_dm(cls, model, admin_site, obj)
def install_multilingual_modeladmin_new():
"""
Override ModelAdmin.__new__ to create automatic inline
editor for multilingual models.
"""
admin.ModelAdmin._original_new_before_dm = admin.ModelAdmin.__new__
admin.ModelAdmin.__new__ = staticmethod(multilingual_modeladmin_new)
| mit |
t794104/ansible | test/units/modules/network/f5/test_bigip_data_group.py | 16 | 16352 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_data_group import ModuleParameters
from library.modules.bigip_data_group import ModuleManager
from library.modules.bigip_data_group import ExternalManager
from library.modules.bigip_data_group import InternalManager
from library.modules.bigip_data_group import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_data_group import ModuleParameters
from ansible.modules.network.f5.bigip_data_group import ModuleManager
from ansible.modules.network.f5.bigip_data_group import ExternalManager
from ansible.modules.network.f5.bigip_data_group import InternalManager
from ansible.modules.network.f5.bigip_data_group import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
type='address',
delete_data_group_file=False,
internal=False,
records=[
dict(
key='10.10.10.10/32',
value='bar'
)
],
separator=':=',
state='present',
partition='Common'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.type == 'ip'
assert p.delete_data_group_file is False
assert len(p.records) == 1
assert 'data' in p.records[0]
assert 'name' in p.records[0]
assert p.records[0]['data'] == 'bar'
assert p.records[0]['name'] == '10.10.10.10/32'
assert p.separator == ':='
assert p.state == 'present'
assert p.partition == 'Common'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_external_datagroup_type_string(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=False,
records_src="{0}/data-group-string.txt".format(fixture_path),
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = ExternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
results = mm0.exec_module()
assert results['changed'] is True
def test_create_external_incorrect_address_data(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=False,
type='address',
records_src="{0}/data-group-string.txt".format(fixture_path),
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = ExternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
with pytest.raises(F5ModuleError) as ex:
mm0.exec_module()
assert "When specifying an 'address' type, the value to the left of the separator must be an IP." == str(ex.value)
def test_create_external_incorrect_integer_data(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=False,
type='integer',
records_src="{0}/data-group-string.txt".format(fixture_path),
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = ExternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
with pytest.raises(F5ModuleError) as ex:
mm0.exec_module()
assert "When specifying an 'integer' type, the value to the left of the separator must be a number." == str(ex.value)
def test_remove_data_group_keep_file(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=False,
state='absent',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = ExternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[True, False])
mm1.remove_from_device = Mock(return_value=True)
mm1.external_file_exists = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
results = mm0.exec_module()
assert results['changed'] is True
def test_remove_data_group_remove_file(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=True,
internal=False,
state='absent',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = ExternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[True, False])
mm1.remove_from_device = Mock(return_value=True)
mm1.external_file_exists = Mock(return_value=True)
mm1.remove_data_group_file_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
results = mm0.exec_module()
assert results['changed'] is True
def test_create_internal_datagroup_type_string(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=True,
records_src="{0}/data-group-string.txt".format(fixture_path),
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = InternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
results = mm0.exec_module()
assert results['changed'] is True
def test_create_internal_incorrect_integer_data(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=True,
type='integer',
records_src="{0}/data-group-string.txt".format(fixture_path),
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = InternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
with pytest.raises(F5ModuleError) as ex:
mm0.exec_module()
assert "When specifying an 'integer' type, the value to the left of the separator must be a number." == str(ex.value)
def test_create_internal_datagroup_type_integer(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=True,
type='integer',
records_src="{0}/data-group-integer.txt".format(fixture_path),
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = InternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
results = mm0.exec_module()
assert results['changed'] is True
def test_create_internal_datagroup_type_address(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=True,
type='address',
records_src="{0}/data-group-address.txt".format(fixture_path),
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = InternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
results = mm0.exec_module()
assert results['changed'] is True
def test_create_internal_datagroup_type_address_list(self, *args):
set_module_args(dict(
name='foo',
delete_data_group_file=False,
internal=True,
type='address',
records=[
dict(
key='10.0.0.0/8',
value='Network1'
),
dict(
key='172.16.0.0/12',
value='Network2'
),
dict(
key='192.168.20.1/16',
value='Network3'
),
dict(
key='192.168.20.1',
value='Host1'
),
dict(
key='172.16.1.1',
value='Host2'
)
],
separator=':=',
state='present',
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
mm1 = InternalManager(module=module, params=module.params)
mm1.exists = Mock(side_effect=[False, True])
mm1.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm0 = ModuleManager(module=module)
mm0.get_manager = Mock(return_value=mm1)
results = mm0.exec_module()
assert results['changed'] is True
| gpl-3.0 |
FreeAgent/djangoappengine-starter | django/contrib/admindocs/urls.py | 336 | 1089 | from django.conf.urls.defaults import *
from django.contrib.admindocs import views
urlpatterns = patterns('',
url('^$',
views.doc_index,
name='django-admindocs-docroot'
),
url('^bookmarklets/$',
views.bookmarklets,
name='django-admindocs-bookmarklets'
),
url('^tags/$',
views.template_tag_index,
name='django-admindocs-tags'
),
url('^filters/$',
views.template_filter_index,
name='django-admindocs-filters'
),
url('^views/$',
views.view_index,
name='django-admindocs-views-index'
),
url('^views/(?P<view>[^/]+)/$',
views.view_detail,
name='django-admindocs-views-detail'
),
url('^models/$',
views.model_index,
name='django-admindocs-models-index'
),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.model_detail,
name='django-admindocs-models-detail'
),
url('^templates/(?P<template>.*)/$',
views.template_detail,
name='django-admindocs-templates'
),
)
| bsd-3-clause |
hguemar/cinder | cinder/tests/test_ibmnas.py | 1 | 18917 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Nilesh Bhosale <[email protected]>
# Sasikanth Eda <[email protected]>
"""
Tests for the IBM NAS family (SONAS, Storwize V7000 Unified,
NAS based IBM GPFS Storage Systems).
"""
import mock
from oslo.utils import units
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import ibmnas
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class FakeEnv(object):
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class IBMNASDriverTestCase(test.TestCase):
TEST_NFS_EXPORT = 'nfs-host1:/export'
TEST_SIZE_IN_GB = 1
TEST_EXTEND_SIZE_IN_GB = 2
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE = '/mnt'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_VOLUME_PATH = '/export/volume-123'
TEST_SNAP_PATH = '/export/snapshot-123'
def setUp(self):
super(IBMNASDriverTestCase, self).setUp()
self._driver = ibmnas.IBMNAS_NFSDriver(configuration=
conf.Configuration(None))
self._mock = mock.Mock()
self._def_flags = {'nas_ip': 'hostname',
'nas_login': 'user',
'nas_ssh_port': 22,
'nas_password': 'pass',
'nas_private_key': 'nas.key',
'ibmnas_platform_type': 'v7ku',
'nfs_shares_config': None,
'nfs_sparsed_volumes': True,
'nfs_used_ratio': 0.95,
'nfs_oversub_ratio': 1.0,
'nfs_mount_point_base':
self.TEST_MNT_POINT_BASE,
'nfs_mount_options': None}
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
def _set_flag(self, flag, value):
group = self._driver.configuration.config_group
self._driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self._driver.configuration.local_conf.reset()
for k, v in self._def_flags.iteritems():
self._set_flag(k, v)
def test_check_for_setup_error(self):
"""Check setup with bad parameters."""
drv = self._driver
required_flags = [
'nas_ip',
'nas_login',
'nas_ssh_port']
for flag in required_flags:
self._set_flag(flag, None)
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
self._set_flag('nas_password', None)
self._set_flag('nas_private_key', None)
self.assertRaises(exception.InvalidInput,
self._driver.check_for_setup_error)
self._set_flag('ibmnas_platform_type', None)
self.assertRaises(exception.InvalidInput,
self._driver.check_for_setup_error)
self._reset_flags()
def test_get_provider_location(self):
"""Check provider location for given volume id."""
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
mock.drv._get_provider_location.return_value = self.TEST_NFS_EXPORT
self.assertEqual(self.TEST_NFS_EXPORT,
mock.drv._get_provider_location(volume['id']))
def test_get_export_path(self):
"""Check export path for the given volume."""
mock = self._mock
volume = FakeEnv()
volume['id'] = '123'
mock.drv._get_export_path.return_value = self.TEST_NFS_EXPORT.\
split(':')[1]
self.assertEqual(self.TEST_NFS_EXPORT.split(':')[1],
mock.drv._get_export_path(volume['id']))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ensure_shares_mounted')
def test_update_volume_stats(self, mock_ensure):
"""Check update volume stats."""
drv = self._driver
mock_ensure.return_value = True
fake_avail = 80 * units.Gi
fake_size = 2 * fake_avail
fake_used = 10 * units.Gi
with mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_capacity_info',
return_value=(fake_avail, fake_size, fake_used)):
stats = drv.get_volume_stats()
self.assertEqual(stats['volume_backend_name'], 'IBMNAS_NFS')
self.assertEqual(stats['storage_protocol'], 'nfs')
self.assertEqual(stats['driver_version'], '1.1.0')
self.assertEqual(stats['vendor_name'], 'IBM')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
def test_ssh_operation(self, mock_ssh):
drv = self._driver
mock_ssh.return_value = None
self.assertEqual(None, drv._ssh_operation('ssh_cmd'))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
def test_ssh_operation_exception(self, mock_ssh):
drv = self._driver
mock_ssh.side_effect = (
exception.VolumeBackendAPIException(data='Failed'))
self.assertRaises(exception.VolumeBackendAPIException,
drv._ssh_operation, 'ssh_cmd')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
@mock.patch('oslo_concurrency.processutils.execute')
def test_create_ibmnas_snap_mount_point_provided(self, mock_ssh,
mock_execute):
"""Create ibmnas snap if mount point is provided."""
drv = self._driver
mock_ssh.return_value = True
mock_execute.return_value = True
self.assertEqual(None, drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
self.TEST_MNT_POINT))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
@mock.patch('oslo_concurrency.processutils.execute')
def test_create_ibmnas_snap_nas_gpfs(self, mock_execute, mock_ssh):
"""Create ibmnas snap if mount point is provided."""
drv = self._driver
drv.configuration.platform = 'gpfs-nas'
mock_ssh.return_value = True
mock_execute.return_value = True
self.assertEqual(None, drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
self.TEST_MNT_POINT))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_snap_no_mount_point_provided(self, mock_ssh):
"""Create ibmnas snap if no mount point is provided."""
drv = self._driver
mock_ssh.return_value = True
self.assertEqual(None, drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH,
None))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_snap_nas_gpfs_no_mount(self, mock_ssh):
"""Create ibmnas snap (gpfs-nas) if mount point is provided."""
drv = self._driver
drv.configuration.platform = 'gpfs-nas'
mock_ssh.return_value = True
drv._create_ibmnas_snap(self.TEST_VOLUME_PATH,
self.TEST_SNAP_PATH, None)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_copy(self, mock_ssh):
"""Create ibmnas copy test case."""
drv = self._driver
TEST_DEST_SNAP = '/export/snapshot-123.snap'
TEST_DEST_PATH = '/export/snapshot-123'
mock_ssh.return_value = True
drv._create_ibmnas_copy(self.TEST_VOLUME_PATH,
TEST_DEST_PATH,
TEST_DEST_SNAP)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_ssh_operation')
def test_create_ibmnas_copy_nas_gpfs(self, mock_ssh):
"""Create ibmnas copy for gpfs-nas platform test case."""
drv = self._driver
TEST_DEST_SNAP = '/export/snapshot-123.snap'
TEST_DEST_PATH = '/export/snapshot-123'
drv.configuration.platform = 'gpfs-nas'
mock_ssh.return_value = True
drv._create_ibmnas_copy(self.TEST_VOLUME_PATH,
TEST_DEST_PATH,
TEST_DEST_SNAP)
@mock.patch('cinder.image.image_utils.resize_image')
def test_resize_volume_file(self, mock_size):
"""Resize volume file test case."""
drv = self._driver
mock_size.return_value = True
self.assertEqual(True, drv._resize_volume_file(self.TEST_LOCAL_PATH,
self.TEST_EXTEND_SIZE_IN_GB))
@mock.patch('cinder.image.image_utils.resize_image')
def test_resize_volume_exception(self, mock_size):
"""Resize volume file test case."""
drv = self._driver
mock_size.side_effect = (
exception.VolumeBackendAPIException(data='Failed'))
self.assertRaises(exception.VolumeBackendAPIException,
drv._resize_volume_file,
self.TEST_LOCAL_PATH,
self.TEST_EXTEND_SIZE_IN_GB)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_resize_volume_file')
def test_extend_volume(self, mock_resize, mock_local):
"""Extend volume to greater size test case."""
drv = self._driver
mock_local.return_value = self.TEST_LOCAL_PATH
mock_resize.return_value = True
volume = FakeEnv()
volume['name'] = 'vol-123'
drv.extend_volume(volume,
self.TEST_EXTEND_SIZE_IN_GB)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
@mock.patch('oslo_concurrency.processutils.execute')
def test_delete_snapfiles(self, mock_execute, mock_ssh):
"""Delete_snapfiles test case."""
drv = self._driver
expected = ('Parent Depth Parent inode'
'File name\n yes 0 /ibm/gpfs0/gshare/\n'
'volume-123\n EFSSG1000I The command'
'completed successfully.', '')
mock_ssh.return_value = expected
mock_execute.return_value = expected
drv._delete_snapfiles(self.TEST_VOLUME_PATH,
self.TEST_MNT_POINT)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver._run_ssh')
@mock.patch('oslo_concurrency.processutils.execute')
def test_delete_snapfiles_nas_gpfs(self, mock_execute, mock_ssh):
"""Delete_snapfiles for gpfs-nas platform test case."""
drv = self._driver
drv.configuration.platform = 'gpfs-nas'
expected = ('Parent Depth Parent inode'
'File name\n'
'------ ----- -------------'
'- ---------\n'
'yes 0\n'
'/ibm/gpfs0/gshare/volume-123', '')
mock_ssh.return_value = expected
mock_execute.return_value = expected
drv._delete_snapfiles(self.TEST_VOLUME_PATH,
self.TEST_MNT_POINT)
def test_delete_volume_no_provider_location(self):
"""Delete volume with no provider location specified."""
drv = self._driver
volume = FakeEnv()
volume['name'] = 'volume-123'
volume['provider_location'] = None
result = drv.delete_volume(volume)
self.assertIsNone(result)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_delete_snapfiles')
def test_delete_volume(self, mock_snap, mock_export):
"""Delete volume test case."""
drv = self._driver
mock_export.return_value = self.TEST_VOLUME_PATH
mock_snap.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = '/volume-123'
volume['provider_location'] = self.TEST_VOLUME_PATH
self.assertEqual(None, drv.delete_volume(volume))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_provider_location')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_mount_point_for_share')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_create_ibmnas_snap')
def test_create_snapshot(self, mock_snap, mock_mount, mock_provider,
mock_export):
"""Create snapshot simple test case."""
drv = self._driver
mock_export.return_value = self.TEST_LOCAL_PATH
mock_provider.return_value = self.TEST_VOLUME_PATH
mock_mount.return_value = self.TEST_MNT_POINT
mock_snap.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = 'volume-123'
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = '/volume-123'
snapshot['name'] = '/snapshot-123'
drv.create_snapshot(snapshot)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_provider_location')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_mount_point_for_share')
@mock.patch('oslo_concurrency.processutils.execute')
def test_delete_snapshot(self, mock_execute, mock_mount, mock_provider):
"""Delete snapshot simple test case."""
drv = self._driver
mock_provider.return_value = self.TEST_VOLUME_PATH
mock_mount.return_value = self.TEST_LOCAL_PATH
mock_execute.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['provider_location'] = self.TEST_NFS_EXPORT
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = 'volume-123'
snapshot['name'] = 'snapshot-123'
drv.delete_snapshot(snapshot)
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_create_ibmnas_copy')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_find_share')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_set_rw_permissions_for_owner')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_resize_volume_file')
def test_create_cloned_volume(self, mock_resize, mock_rw, mock_local,
mock_find, mock_copy, mock_export):
"""Clone volume with equal size test case."""
drv = self._driver
mock_export.return_value = self.TEST_VOLUME_PATH
mock_copy.return_value = True
mock_find.return_value = self.TEST_LOCAL_PATH
mock_local.return_value = self.TEST_LOCAL_PATH
mock_rw.return_value = True
mock_resize.return_value = True
volume_src = FakeEnv()
volume_src['id'] = '123'
volume_src['name'] = '/volume-123'
volume_src.size = self.TEST_SIZE_IN_GB
volume_dest = FakeEnv()
volume_dest['id'] = '456'
volume_dest['name'] = '/volume-456'
volume_dest['size'] = self.TEST_SIZE_IN_GB
volume_dest.size = self.TEST_SIZE_IN_GB
self.assertEqual({'provider_location': self.TEST_LOCAL_PATH},
drv.create_cloned_volume(volume_dest, volume_src))
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_get_export_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_create_ibmnas_snap')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_find_share')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_set_rw_permissions_for_owner')
@mock.patch('cinder.volume.drivers.ibm.ibmnas.IBMNAS_NFSDriver.'
'_resize_volume_file')
def test_create_volume_from_snapshot(self, mock_resize, mock_rw,
mock_local, mock_find, mock_snap,
mock_export):
"""Create volume from snapshot test case."""
drv = self._driver
mock_export.return_value = '/export'
mock_snap.return_value = self.TEST_LOCAL_PATH
mock_find.return_value = self.TEST_LOCAL_PATH
mock_local.return_value = self.TEST_VOLUME_PATH
mock_rw.return_value = True
mock_resize.return_value = True
volume = FakeEnv()
volume['id'] = '123'
volume['name'] = '/volume-123'
volume['size'] = self.TEST_SIZE_IN_GB
snapshot = FakeEnv()
snapshot['volume_id'] = volume['id']
snapshot['volume_name'] = 'volume-123'
snapshot['volume_size'] = self.TEST_SIZE_IN_GB
snapshot.name = '/snapshot-123'
self.assertEqual({'provider_location': self.TEST_LOCAL_PATH},
drv.create_volume_from_snapshot(volume, snapshot))
| apache-2.0 |
CanonicalLtd/subiquity | subiquitycore/ui/actionmenu.py | 1 | 4904 | # Copyright 2018 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import attr
from urwid import (
ACTIVATE,
AttrWrap,
Button,
connect_signal,
LineBox,
PopUpLauncher,
SelectableIcon,
Text,
Widget,
)
from subiquitycore.ui.container import (
Columns,
ListBox,
WidgetWrap,
)
from subiquitycore.ui.utils import Color
class ActionBackButton(Button):
button_left = Text("\N{BLACK LEFT-POINTING SMALL TRIANGLE}")
button_right = Text("")
class ActionMenuOpenButton(Button):
button_left = Text("")
button_right = Text("\N{BLACK RIGHT-POINTING SMALL TRIANGLE}")
class ActionMenuButton(Button):
button_left = Text("")
button_right = Text("")
class _ActionMenuDialog(WidgetWrap):
"""A list of action buttons with a box around them."""
def __init__(self, parent):
self.parent = parent
close_text = "(close)"
close = ActionBackButton(close_text)
connect_signal(close, "click", self.close)
group = [Color.menu_button(close)]
width = len(close_text)
for i, action in enumerate(self.parent._actions):
if action.enabled:
if isinstance(action.label, Widget):
btn = action.label
elif action.opens_dialog:
btn = Color.menu_button(ActionMenuOpenButton(action.label))
else:
btn = Color.menu_button(ActionMenuButton(action.label))
width = max(width, len(btn.base_widget.label))
connect_signal(
btn.base_widget, 'click', self.click, action.value)
else:
label = action.label
if isinstance(label, Widget):
label = label.base_widget.label
width = max(width, len(label))
if action.opens_dialog:
rhs = "\N{BLACK RIGHT-POINTING SMALL TRIANGLE}"
else:
rhs = ""
btn = Columns([
('fixed', 1, Text("")),
Text(label),
('fixed', 1, Text(rhs)),
], dividechars=1)
btn = AttrWrap(btn, 'info_minor')
group.append(btn)
self.width = width
super().__init__(Color.body(LineBox(ListBox(group))))
def close(self, sender):
self.parent.close_pop_up()
def click(self, btn, value):
self.parent._action(value)
self.parent.close_pop_up()
def keypress(self, size, key):
if key == 'esc':
self.parent.close_pop_up()
else:
return super().keypress(size, key)
@attr.s
class Action:
# The label that is shown in the menu
label = attr.ib()
enabled = attr.ib()
# The value passed along with the 'action' signal
value = attr.ib()
# Actions that open a dialog get a > at the end.
opens_dialog = attr.ib(default=False)
class ActionMenu(PopUpLauncher):
signals = ['action', 'open', 'close']
def __init__(self, opts,
icon="\N{BLACK RIGHT-POINTING SMALL TRIANGLE}"):
self._actions = []
for opt in opts:
if not isinstance(opt, Action):
opt = Action(*opt)
self._actions.append(opt)
self.icon = icon
self._button = SelectableIcon(self.icon, 0)
super().__init__(self._button)
self._dialog = _ActionMenuDialog(self)
def get_natural_width(self):
return len(self.icon)
def keypress(self, size, key):
if self._command_map[key] != ACTIVATE:
return key
self.open_pop_up()
def _action(self, action):
self._emit("action", action)
def open_pop_up(self):
self._dialog._w.base_widget.focus_position = 0
self._emit("open")
super().open_pop_up()
def close_pop_up(self):
self._emit("close")
super().close_pop_up()
def create_pop_up(self):
return self._dialog
def get_pop_up_parameters(self):
width = self._dialog.width + 7
return {
'left': 1,
'top': -1,
'overlay_width': width,
'overlay_height': len(self._actions) + 3,
}
| agpl-3.0 |
inspirehep/json-merger | json_merger/version.py | 1 | 1199 | # -*- coding: utf-8 -*-
#
# This file is part of Inspirehep.
# Copyright (C) 2016, 2017, 2018 CERN.
#
# Inspirehep is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Inspirehep is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Inspirehep; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Version information for json-merger.
This file is imported by ``json_merger.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = "0.7.1"
| gpl-2.0 |
rbardak/espresso | samples/python/debye_hueckel.py | 2 | 7857 | #
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
from espressomd import analyze
from espressomd import integrate
from espressomd import electrostatics
import numpy
print("""
=======================================================
= debye_hueckel.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# Constants
#############################################################
N_A = 6.022e23
pi = 3.14159265359
# System parameters
#############################################################
box_l = 10
# Molar salt concentration
mol_dens = 0.1
# Number density of ions
num_dens = mol_dens * N_A
# Convert to MD units with lj_sig = 7.14 Angstrom
num_dens = num_dens * 3.64e-25
volume = box_l * box_l * box_l
n_part = int(volume * num_dens)
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.skin = 0.4
#es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0')
thermostat.Thermostat().set_langevin(1.0, 1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 10
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
for i in range(n_part / 2):
system.part[2 * i].q = -1.0
system.part[2 * i].type = 1
system.part[2 * i + 1].q = 1.0
system.part[2 * i + 1].type = 2
# for i in range(n_part-1):
# print("Particle {} has charge {} and is of type {}.".format(i,system.part[i].q,system.part[i].type))
# Activating the Debye-Hueckel interaction
# The Bjerrum length is set to one. Assuming the solvent is water, this
# means that lj_sig is 0.714 nm in SI units.
l_B = 1
# inverse Debye length for 1:1 electrolyte in water at room temperature (nm)
dh_kappa = numpy.sqrt(mol_dens) / 0.304
# convert to MD units
dh_kappa = dh_kappa / 0.714
dh = electrostatics.DH(
bjerrum_length=l_B, kappa=dh_kappa, r_cut=int(5 / dh_kappa))
system.actors.add(dh)
print(system.actors)
analyze.distto(system, 0)
print("Simulate {} monovalent salt in a cubic simulation box {} at molar concentration {}."
.format(n_part, box_l, mol_dens).strip())
print("Interactions:\n")
act_min_dist = analyze.mindist(es)
print("Start with minimal distance {}".format(act_min_dist))
system.max_num_cells = 2744
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pydebye_hueckel.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
# set obs_file [open "$name$ident.obs" "w"]
# puts $obs_file "\# System: $name$ident"
# puts $obs_file "\# Time\tE_tot\tE_kin\t..."
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
integrate.integrate(warm_steps)
# Warmup criterion
act_min_dist = analyze.mindist(es)
# print("\rrun %d at time=%f (LJ cap=%f) min dist = %f\r" % (i,system.time,lj_cap,act_min_dist), end=' ')
i += 1
# write observables
# puts $obs_file "{ time [setmd time] } [analyze energy]"
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
# Just to see what else we may get from the c code
print("""
ro variables:
cell_grid {0.cell_grid}
cell_size {0.cell_size}
local_box_l {0.local_box_l}
max_cut {0.max_cut}
max_part {0.max_part}
max_range {0.max_range}
max_skin {0.max_skin}
n_nodes {0.n_nodes}
n_part {0.n_part}
n_part_types {0.n_part_types}
periodicity {0.periodicity}
transfer_rate {0.transfer_rate}
verlet_reuse {0.verlet_reuse}
""".format(system))
# write parameter file
# polyBlockWrite "$name$ident.set" {box_l time_step skin} ""
set_file = open("pydebye_hueckel.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# print initial energies
#energies = es._espressoHandle.Tcl_Eval('analyze energy')
energies = analyze.energy(system=system)
print(energies)
j = 0
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
# es._espressoHandle.Tcl_Eval('integrate %d' % int_steps)
integrate.integrate(int_steps)
# energies = es._espressoHandle.Tcl_Eval('analyze energy')
energies = analyze.energy(system=system)
print(energies)
obs_file.write('{ time %s } %s\n' % (system.time, energies))
# write observables
# set energies [analyze energy]
# puts $obs_file "{ time [setmd time] } $energies"
# puts -nonewline "temp = [expr [lindex $energies 1 1]/(([degrees_of_freedom]/2.0)*[setmd n_part])]\r"
# flush stdout
# write intermediate configuration
# if { $i%10==0 } {
# polyBlockWrite "$name$ident.[format %04d $j]" {time box_l} {id pos type}
# incr j
# }
# write end configuration
end_file = open("pydebye_hueckel.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {type q pos} }")
for i in range(n_part - 1):
end_file.write("%s\t%s\t%s\n" %
(system.part[i].type, system.part[i].q, system.part[i].pos))
# id & type not working yet
obs_file.close()
set_file.close()
end_file.close()
# es._espressoHandle.die()
# terminate program
print("\nFinished.")
| gpl-3.0 |
piffey/ansible | lib/ansible/modules/cloud/ovirt/ovirt_mac_pools.py | 73 | 5430 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_mac_pools
short_description: Module to manage MAC pools in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage MAC pools in oVirt/RHV."
options:
name:
description:
- "Name of the MAC pool to manage."
required: true
description:
description:
- "Description of the MAC pool."
state:
description:
- "Should the mac pool be present or absent."
choices: ['present', 'absent']
default: present
allow_duplicates:
description:
- "If (true) allow a MAC address to be used multiple times in a pool."
- "Default value is set by oVirt/RHV engine to I(false)."
ranges:
description:
- "List of MAC ranges. The from and to should be split by comma."
- "For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create MAC pool:
- ovirt_mac_pools:
name: mymacpool
allow_duplicates: false
ranges:
- 00:1a:4a:16:01:51,00:1a:4a:16:01:61
- 00:1a:4a:16:02:51,00:1a:4a:16:02:61
# Remove MAC pool:
- ovirt_mac_pools:
state: absent
name: mymacpool
'''
RETURN = '''
id:
description: ID of the MAC pool which is managed
returned: On success if MAC pool is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
template:
description: "Dictionary of all the MAC pool attributes. MAC pool attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/mac_pool."
returned: On success if MAC pool is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
equal,
create_connection,
ovirt_full_argument_spec,
)
class MACPoolModule(BaseModule):
def build_entity(self):
return otypes.MacPool(
name=self._module.params['name'],
allow_duplicates=self._module.params['allow_duplicates'],
description=self._module.params['description'],
ranges=[
otypes.Range(
from_=mac_range.split(',')[0],
to=mac_range.split(',')[1],
)
for mac_range in self._module.params['ranges']
],
)
def _compare_ranges(self, entity):
if self._module.params['ranges'] is not None:
ranges = sorted([
'%s,%s' % (mac_range.from_, mac_range.to)
for mac_range in entity.ranges
])
return equal(sorted(self._module.params['ranges']), ranges)
return True
def update_check(self, entity):
return (
self._compare_ranges(entity) and
equal(self._module.params['allow_duplicates'], entity.allow_duplicates) and
equal(self._module.params['description'], entity.description)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
allow_duplicates=dict(default=None, type='bool'),
description=dict(default=None),
ranges=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
mac_pools_service = connection.system_service().mac_pools_service()
mac_pools_module = MACPoolModule(
connection=connection,
module=module,
service=mac_pools_service,
)
state = module.params['state']
if state == 'present':
ret = mac_pools_module.create()
elif state == 'absent':
ret = mac_pools_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
seanli9jan/tensorflow | tensorflow/python/kernel_tests/sparse_cross_op_test.py | 8 | 16655 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_cross_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseCrossOpTest(test.TestCase):
def test_simple(self):
"""Tests a simple scenario."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_dense(self):
"""Tests only dense inputs."""
op = sparse_ops.sparse_cross([
constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],
['batch2-FC1-F1', 'batch2-FC1-F2']],
dtypes.string),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',
'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'
], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_sparse(self):
"""Tests mixed type."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([[11], [333, 55555]]),
self._sparse_tensor([['batch1-FC2-F1'],
['batch2-FC2-F1', 'batch2-FC2-F2']])
])
expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',
'55555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_mixed_string_dense(self):
"""Tests mixed dense inputs."""
op = sparse_ops.sparse_cross([
constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor([[
'11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',
'333_X_batch1-FC2-F2'
], [
'55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',
'999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_sparse_cross_dense(self):
"""Tests sparse and dense inputs."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1'],
['batch2-FC1-F1', 'batch2-FC1-F2']]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [
'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',
'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_integer_sparse_input(self):
"""Tests mixed type sparse and dense inputs."""
op = sparse_ops.sparse_cross([
self._sparse_tensor([[11], [333, 5555]]),
constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],
['batch2-FC2-F1', 'batch2-FC2-F2']],
dtypes.string),
])
expected_out = self._sparse_tensor(
[['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [
'333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',
'5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x3x3(self):
"""Tests 3x3x3 permutation."""
op = sparse_ops.sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor(
[['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),
self._sparse_tensor(
[['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_permutation_3x1x2(self):
"""Tests 3x1x2 permutation."""
op = sparse_ops.sparse_cross([
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'
]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_large_batch(self):
"""Tests with large batch size to force multithreading."""
batch_size = 5000
col1 = []
col2 = []
col3 = []
for b in range(batch_size):
col1.append(
['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])
col2.append(['batch%d-FC2-F1' % b])
col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])
op = sparse_ops.sparse_cross([
self._sparse_tensor(col1),
self._sparse_tensor(col2),
self._sparse_tensor(col3)
])
col_out = []
for b in range(batch_size):
col_out.append([
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),
'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)
])
expected_out = self._sparse_tensor(col_out)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_one_column_empty(self):
"""Tests when one column is empty.
The crossed tensor should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),
self._sparse_tensor([], 1),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_some_columns_empty(self):
"""Tests when more than one columns are empty.
Cross for the corresponding batch should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),
self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)
])
expected_out = self._sparse_tensor([[
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',
'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'
]], 2)
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_all_columns_empty(self):
"""Tests when all columns are empty.
The crossed tensor should be empty.
"""
op = sparse_ops.sparse_cross([
self._sparse_tensor([]),
self._sparse_tensor([]),
self._sparse_tensor([])
])
with self.cached_session() as sess:
self._assert_sparse_tensor_empty(sess.run(op))
def test_hashed_zero_bucket_no_hash_key(self):
op = sparse_ops.sparse_cross_hashed([
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
])
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[1971693436396284976]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_zero_bucket(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[4847552627144134031]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
# TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.
def test_hashed_no_hash_key(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[83]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed_output(self):
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor([['batch1-FC1-F1']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1']])
],
num_buckets=100,
hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[31]])
with self.cached_session() as sess:
self._assert_sparse_tensor_equals(expected_out, sess.run(op))
def test_hashed__has_no_collision(self):
"""Tests that fingerprint concatenation has no collisions."""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]])
t2 = constant_op.constant([list(range(10)), list(range(10))])
cross = sparse_ops.sparse_cross_hashed(
[t2, t1], num_buckets=1024, hash_key=sparse_ops._DEFAULT_HASH_KEY + 1)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = cross_dense.eval()
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output."""
op = sparse_ops.sparse_cross_hashed(
[
self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),
self._sparse_tensor([['batch1-FC2-F1']]),
self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
],
num_buckets=1000)
with self.cached_session() as sess:
out = sess.run(op)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def _assert_sparse_tensor_empty(self, sp):
self.assertEquals(0, sp.indices.size)
self.assertEquals(0, sp.values.size)
# TODO(zakaria): check if we can ignore the first dim of the shape.
self.assertEquals(0, sp.dense_shape[1])
def _assert_sparse_tensor_equals(self, sp1, sp2):
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
self.assertAllEqual(sp1.values.eval(), sp2.values)
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
def _sparse_tensor(self, data, batch_size=-1):
"""Generates a SparseTensor.
Args:
data: Should be a list of list of strings or int64. Each item of the outer
list represents a batch. Each item of the batch is a feature of a
specific feature column.
batch_size: optional batch size, especially for cases when data has no
entry for some batches.
Returns:
A SparseTensor.
"""
indices = []
values = []
max_col_count = 0
for batch, batch_ix in zip(data, range(len(data))):
for column, column_ix in zip(batch, range(len(batch))):
indices.append([batch_ix, column_ix])
values.append(column)
max_col_count = max(max_col_count, column_ix + 1)
shape = [batch_size if batch_size != -1 else len(data), max_col_count]
value_type = (dtypes.string if not values or isinstance(values[0], str) else
dtypes.int64)
return sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64, [len(indices), 2]),
constant_op.constant(values, value_type, [len(indices)]),
constant_op.constant(shape, dtypes.int64))
if __name__ == '__main__':
test.main()
| apache-2.0 |
aerickson/xbmc | lib/gtest/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
yfried/ansible | lib/ansible/modules/storage/netapp/na_ontap_net_ifgrp.py | 9 | 11332 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_net_ifgrp
short_description: NetApp Ontap modify network interface group
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team ([email protected])
description:
- Create, modify, destroy the network interface group
options:
state:
description:
- Whether the specified network interface group should exist or not.
choices: ['present', 'absent']
default: present
distribution_function:
description:
- Specifies the traffic distribution function for the ifgrp.
choices: ['mac', 'ip', 'sequential', 'port']
name:
description:
- Specifies the interface group name.
required: true
mode:
description:
- Specifies the link policy for the ifgrp.
node:
description:
- Specifies the name of node.
required: true
port:
description:
- Adds the specified port.
"""
EXAMPLES = """
- name: create ifgrp
na_ontap_net_ifgrp:
state=present
username={{ netapp_username }}
password={{ netapp_password }}
hostname={{ netapp_hostname }}
distribution_function=ip
name=a0c
port=e0d
mode=multimode
node={{ Vsim node name }}
- name: delete ifgrp
na_ontap_net_ifgrp:
state=absent
username={{ netapp_username }}
password={{ netapp_password }}
hostname={{ netapp_hostname }}
name=a0c
node={{ Vsim node name }}
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapIfGrp(object):
"""
Create, Modifies and Destroys a IfGrp
"""
def __init__(self):
"""
Initialize the Ontap IfGrp class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
distribution_function=dict(required=False, type='str', choices=['mac', 'ip', 'sequential', 'port']),
name=dict(required=True, type='str'),
mode=dict(required=False, type='str'),
node=dict(required=True, type='str'),
port=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['distribution_function', 'mode'])
],
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.state = parameters['state']
self.distribution_function = parameters['distribution_function']
self.name = parameters['name']
self.mode = parameters['mode']
self.node = parameters['node']
self.port = parameters['port']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def get_if_grp(self):
"""
Return details about the if_group
:param:
name : Name of the if_group
:return: Details about the if_group. None if not found.
:rtype: dict
"""
if_group_iter = netapp_utils.zapi.NaElement('net-port-get-iter')
if_group_info = netapp_utils.zapi.NaElement('net-port-info')
if_group_info.add_new_child('port', self.name)
if_group_info.add_new_child('port-type', 'if_group')
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(if_group_info)
if_group_iter.add_child_elem(query)
result = self.server.invoke_successfully(if_group_iter, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
if_group_attributes = result.get_child_by_name('attributes-list').get_child_by_name('net-port-info')
distribution_function = if_group_attributes.get_child_content('ifgrp-distribution-function')
name = if_group_attributes.get_child_content('port')
mode = if_group_attributes.get_child_content('ifgrp-mode')
ports = if_group_attributes.get_child_content('ifgrp-port')
node = if_group_attributes.get_child_content('node')
return_value = {
'name': name,
'distribution_function': distribution_function,
'mode': mode,
'node': node,
'ports': ports
}
return return_value
def get_if_grp_ports(self):
"""
Return ports of the if_group
:param:
name : Name of the if_group
:return: Ports of the if_group. None if not found.
:rtype: dict
"""
if_group_iter = netapp_utils.zapi.NaElement('net-port-ifgrp-get')
if_group_iter.add_new_child('ifgrp-name', self.name)
if_group_iter.add_new_child('node', self.node)
result = self.server.invoke_successfully(if_group_iter, True)
return_value = None
if result.get_child_by_name('attributes'):
if_group_attributes = result.get_child_by_name('attributes').get_child_by_name('net-ifgrp-info')
name = if_group_attributes.get_child_content('ifgrp-name')
mode = if_group_attributes.get_child_content('mode')
port_list = []
if if_group_attributes.get_child_by_name('ports'):
ports = if_group_attributes.get_child_by_name('ports').get_children()
for each in ports:
port_list.append(each.get_content())
node = if_group_attributes.get_child_content('node')
return_value = {
'name': name,
'mode': mode,
'node': node,
'ports': port_list
}
return return_value
def create_if_grp(self):
"""
Creates a new ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-create")
route_obj.add_new_child("distribution-function", self.distribution_function)
route_obj.add_new_child("ifgrp-name", self.name)
route_obj.add_new_child("mode", self.mode)
route_obj.add_new_child("node", self.node)
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating if_group %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
def delete_if_grp(self):
"""
Deletes a ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-destroy")
route_obj.add_new_child("ifgrp-name", self.name)
route_obj.add_new_child("node", self.node)
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting if_group %s: %s' % (self.name, to_native(error)),
exception=traceback.format_exc())
def add_port_to_if_grp(self):
"""
adds port to a ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-add-port")
route_obj.add_new_child("ifgrp-name", self.name)
route_obj.add_new_child("port", self.port)
route_obj.add_new_child("node", self.node)
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error adding port %s to if_group %s: %s' %
(self.port, self.name, to_native(error)),
exception=traceback.format_exc())
def remove_port_to_if_grp(self):
"""
removes port from a ifgrp
"""
route_obj = netapp_utils.zapi.NaElement("net-port-ifgrp-remove-port")
route_obj.add_new_child("ifgrp-name", self.name)
route_obj.add_new_child("port", self.port)
route_obj.add_new_child("node", self.node)
try:
self.server.invoke_successfully(route_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error removing port %s to if_group %s: %s' %
(self.port, self.name, to_native(error)),
exception=traceback.format_exc())
def apply(self):
changed = False
ifgroup_exists = False
add_ports_exists = True
remove_ports_exists = False
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_net_ifgrp", cserver)
if_group_detail = self.get_if_grp()
if if_group_detail:
ifgroup_exists = True
ifgrp_ports_detail = self.get_if_grp_ports()
if self.state == 'absent':
changed = True
if self.port:
if self.port in ifgrp_ports_detail['ports']:
remove_ports_exists = True
elif self.state == 'present':
if self.port:
if not ifgrp_ports_detail['ports']:
add_ports_exists = False
changed = True
else:
if self.port not in ifgrp_ports_detail['ports']:
add_ports_exists = False
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not ifgroup_exists:
self.create_if_grp()
if self.port:
self.add_port_to_if_grp()
else:
if not add_ports_exists:
self.add_port_to_if_grp()
elif self.state == 'absent':
if remove_ports_exists:
self.remove_port_to_if_grp()
self.delete_if_grp()
self.module.exit_json(changed=changed)
def main():
"""
Creates the NetApp Ontap Net Route object and runs the correct play task
"""
obj = NetAppOntapIfGrp()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
pandeyadarsh/sympy | sympy/liealgebras/cartan_type.py | 80 | 1775 | from __future__ import print_function, division
from sympy.core import Basic
class CartanType_generator(Basic):
"""
Constructor for actually creating things
"""
def __call__(self, *args):
c = args[0]
c = list(c)
letter, n = c[0], int(c[1])
if n < 0:
raise ValueError("Lie algebra rank cannot be negative")
if letter == "A":
if n >= 0:
from . import type_a
return type_a.TypeA(n)
if letter == "B":
if n >= 0:
from . import type_b
return type_b.TypeB(n)
if letter == "C":
if n >= 0:
from . import type_c
return type_c.TypeC(n)
if letter == "D":
if n >= 0:
from . import type_d
return type_d.TypeD(n)
if letter == "E":
if n >= 6 and n <= 8:
from . import type_e
return type_e.TypeE(n)
if letter == "F":
if n == 4:
from . import type_f
return type_f.TypeF(n)
if letter == "G":
if n == 2:
from . import type_g
return type_g.TypeG(n)
CartanType = CartanType_generator()
class Standard_Cartan(Basic):
"""
Concrete base class for Cartan types such as A4, etc
"""
def __new__(cls, series, n):
obj = Basic.__new__(cls, series, n)
obj.n = n
obj.series = series
return obj
def rank(self):
"""
Returns the rank of the Lie algebra
"""
return self.n
def series(self):
"""
Returns the type of the Lie algebra
"""
return self.series
| bsd-3-clause |
Kazade/NeHe-Website | google_appengine/lib/django_1_2/django/templatetags/i18n.py | 60 | 9958 | import re
from django.template import Node, Variable, VariableNode, _render_value_in_context
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.utils import translation
from django.utils.encoding import force_unicode
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop):
self.noop = noop
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" % self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
output = self.filter_expression.resolve(context)
return _render_value_in_context(output, context)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.render(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
result = translation.ugettext(singular)
# Escape all isolated '%' before substituting in the context.
result = re.sub(u'%(?!\()', u'%%', result)
data = dict([(v, _render_value_in_context(context[v], context)) for v in vars])
context.pop()
return result % data
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
pos = None
m = re.match("^'([^']+)'(\|.*$)",value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'),m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
if self.more():
if self.tag() == 'noop':
noop = True
else:
raise TemplateSyntaxError("only option for 'trans' is 'noop'")
else:
noop = False
return (value, noop)
value, noop = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop)
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count var|length as count %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
"""
class BlockTranslateParser(TokenParser):
def top(self):
countervar = None
counter = None
extra_context = {}
while self.more():
tag = self.tag()
if tag == 'with' or tag == 'and':
value = self.value()
if self.tag() != 'as':
raise TemplateSyntaxError("variable bindings in 'blocktrans' must be 'with value as variable'")
extra_context[self.tag()] = VariableNode(
parser.compile_filter(value))
elif tag == 'count':
counter = parser.compile_filter(self.value())
if self.tag() != 'as':
raise TemplateSyntaxError("counter specification in 'blocktrans' must be 'count value as variable'")
countervar = self.tag()
else:
raise TemplateSyntaxError("unknown subtag %s for 'blocktrans' found" % tag)
return (countervar, counter, extra_context)
countervar, counter, extra_context = BlockTranslateParser(token.contents).top()
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter)
register.tag('get_available_languages', do_get_available_languages)
register.tag('get_current_language', do_get_current_language)
register.tag('get_current_language_bidi', do_get_current_language_bidi)
register.tag('trans', do_translate)
register.tag('blocktrans', do_block_translate)
| bsd-3-clause |
rlugojr/rekall | rekall-agent/rekall_agent/ui/interactive.py | 1 | 6116 | # Rekall Memory Forensics
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Plugins for interactively examining the state of the deployment."""
import json
import arrow
from rekall import yaml_utils
from rekall.plugins.addrspaces import standard
from rekall_agent import common
from rekall_agent import result_collections
from rekall_agent.ui import renderers
class AgentControllerShowFile(common.AbstractControllerCommand):
"""Display information about a file."""
name = "view"
__args = [
dict(name="path", positional=True, required=True,
help="A path to the object to display."),
dict(name="query",
help="If this is a collection, issue this query"),
dict(name="limit",
help="Limit result to this many rows."),
dict(name="mode", type="Choices", choices=["text", "hex"],
help="Mode for dumping files"),
dict(name="encoding", default="ascii",
help="Possible encodings we try for text mode detection."),
dict(name="offset", type="IntParser",
default=0, help="An offset to hexdump."),
]
MAX_SIZE = 10*1024*1024
offset = None
def render(self, renderer):
# Starting offset.
if self.offset is None:
self.offset = self.plugin_args.offset
location = self._config.server.location_from_path_for_server(
self.plugin_args.path)
local_filename = location.get_local_filename()
# Map the location as a file because it could be very large.
address_space = standard.FileAddressSpace(
filename=local_filename, session=self.session)
if address_space.read(0, 13) == "SQLite format":
return self.render_sqlite(location, renderer)
if (address_space.end() < self.MAX_SIZE and
address_space.read(0, 1) in "{["):
try:
data = json.loads(address_space.read(
0, min(self.MAX_SIZE, address_space.end())))
return self.render_json(data, renderer)
except Exception:
pass
# Auto detect the mode.
sample = address_space.read(
0, min(1024 * 1024, address_space.end()))
try:
data = sample.decode(self.plugin_args.encoding)
return self.render_text(
local_filename, self.plugin_args.encoding, renderer)
except UnicodeError:
pass
# Fall back to hexdump
result = self.session.plugins.dump(
rows=self.plugin_args.limit, offset=self.offset,
address_space=address_space)
result.render(renderer)
self.offset = result.offset
return result
def render_text(self, local_filename, encoding, renderer):
renderer.table_header([dict(name="File Contents")])
with open(local_filename, "rb") as fd:
for line in fd:
self.offset = fd.tell()
try:
renderer.table_row(line.decode(encoding))
except UnicodeError:
continue
def render_json(self, data, renderer):
renderer.table_header([dict(name="Message")], auto_widths=True)
renderer.table_row(yaml_utils.safe_dump(data))
def render_sqlite(self, location, renderer):
with result_collections.GenericSQLiteCollection.load_from_location(
location, session=self.session) as collection:
for table in collection.tables:
types = []
headers = []
for column in table.columns:
col_spec = dict(name=column.name)
if column.type == "int":
col_spec["align"] = "r"
if column.type == "epoch":
types.append(arrow.Arrow.fromtimestamp)
else:
types.append(lambda x: x)
headers.append(col_spec)
# If the table is too large we cant wait to auto width it.
auto_widths = max(
self.plugin_args.limit, len(collection)) < 1000
renderer.table_header(headers, auto_widths=auto_widths)
for row in collection.query(
table=table.name, query=self.plugin_args.query,
limit=self.plugin_args.limit):
renderer.table_row(
*[fn(x or 0) for fn, x in zip(types, row)])
class AgentControllerStoreLs(common.AbstractControllerCommand):
"""Show files within the storage bucket."""
name = "bucket_ls"
__args = [
dict(name="path", positional=True,
help="A path to the object to display."),
dict(name="limit", type="IntParser", default=100,
help="Total results to display"),
]
table_header = [
dict(name="Size", width=10),
dict(name="Created", width=25),
dict(name="Name"),
]
def collect(self):
location = self._config.server.location_from_path_for_server(
self.plugin_args.path)
for stat in location.list_files(max_results=self.plugin_args.limit):
yield dict(Name=renderers.UILink("gs", stat.location.to_path()),
Size=stat.size,
Created=stat.created)
| gpl-2.0 |
brainelectronics/towerdefense | pyglet/image/codecs/bmp.py | 25 | 12418 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Decoder for BMP files.
Currently supports version 3 and 4 bitmaps with BI_RGB and BI_BITFIELDS
encoding. Alpha channel is supported for 32-bit BI_RGB only.
'''
# Official docs are at
# http://msdn2.microsoft.com/en-us/library/ms532311.aspx
#
# But some details including alignment and bit/byte order are omitted; see
# http://www.fileformat.info/format/bmp/egff.htm
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from pyglet.image import ImageData
from pyglet.image.codecs import ImageDecoder, ImageDecodeException
BYTE = ctypes.c_ubyte
WORD = ctypes.c_uint16
DWORD = ctypes.c_uint32
LONG = ctypes.c_int32
FXPT2DOT30 = ctypes.c_uint32
BI_RGB = 0
BI_RLE8 = 1
BI_RLE4 = 2
BI_BITFIELDS = 3
class BITMAPFILEHEADER(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [
('bfType', WORD),
('bfSize', DWORD),
('bfReserved1', WORD),
('bfReserved2', WORD),
('bfOffBits', DWORD)
]
class BITMAPINFOHEADER(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [
('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPlanes', WORD),
('biBitCount', WORD),
('biCompression', DWORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD)
]
CIEXYZTRIPLE = FXPT2DOT30 * 9
class BITMAPV4HEADER(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [
('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPlanes', WORD),
('biBitCount', WORD),
('biCompression', DWORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD),
('bV4RedMask', DWORD),
('bV4GreenMask', DWORD),
('bV4BlueMask', DWORD),
('bV4AlphaMask', DWORD),
('bV4CSType', DWORD),
('bV4Endpoints', CIEXYZTRIPLE),
('bV4GammaRed', DWORD),
('bV4GammaGreen', DWORD),
('bV4GammaBlue', DWORD),
]
class RGBFields(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [
('red', DWORD),
('green', DWORD),
('blue', DWORD),
]
class RGBQUAD(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [
('rgbBlue', BYTE),
('rgbGreen', BYTE),
('rgbRed', BYTE),
('rgbReserved', BYTE)
]
def __repr__(self):
return '<%d, %d, %d>' % (self.rgbRed, self.rgbGreen, self.rgbBlue)
def ptr_add(ptr, offset):
address = ctypes.addressof(ptr.contents) + offset
return ctypes.pointer(type(ptr.contents).from_address(address))
def to_ctypes(buffer, offset, type):
if offset + ctypes.sizeof(type) > len(buffer):
raise ImageDecodeException('BMP file is truncated')
ptr = ptr_add(ctypes.pointer(buffer), offset)
return ctypes.cast(ptr, ctypes.POINTER(type)).contents
class BMPImageDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.bmp']
def decode(self, file, filename):
if not file:
file = open(filename, 'rb')
bytes = file.read()
buffer = ctypes.c_buffer(bytes)
if bytes[:2] != b'BM':
raise ImageDecodeException(
'Not a Windows bitmap file: %r' % (filename or file))
file_header = to_ctypes(buffer, 0, BITMAPFILEHEADER)
bits_offset = file_header.bfOffBits
info_header_offset = ctypes.sizeof(BITMAPFILEHEADER)
info_header = to_ctypes(buffer, info_header_offset, BITMAPINFOHEADER)
palette_offset = info_header_offset + info_header.biSize
if info_header.biSize < ctypes.sizeof(BITMAPINFOHEADER):
raise ImageDecodeException(
'Unsupported BMP type: %r' % (filename or file))
width = info_header.biWidth
height = info_header.biHeight
if width <= 0 or info_header.biPlanes != 1:
raise ImageDecodeException(
'BMP file has corrupt parameters: %r' % (filename or file))
pitch_sign = height < 0 and -1 or 1
height = abs(height)
compression = info_header.biCompression
if compression not in (BI_RGB, BI_BITFIELDS):
raise ImageDecodeException(
'Unsupported compression: %r' % (filename or file))
clr_used = 0
bitcount = info_header.biBitCount
if bitcount == 1:
pitch = (width + 7) // 8
bits_type = ctypes.c_ubyte
decoder = decode_1bit
elif bitcount == 4:
pitch = (width + 1) // 2
bits_type = ctypes.c_ubyte
decoder = decode_4bit
elif bitcount == 8:
bits_type = ctypes.c_ubyte
pitch = width
decoder = decode_8bit
elif bitcount == 16:
pitch = width * 2
bits_type = ctypes.c_uint16
decoder = decode_bitfields
elif bitcount == 24:
pitch = width * 3
bits_type = ctypes.c_ubyte
decoder = decode_24bit
elif bitcount == 32:
pitch = width * 4
if compression == BI_RGB:
decoder = decode_32bit_rgb
bits_type = ctypes.c_ubyte
elif compression == BI_BITFIELDS:
decoder = decode_bitfields
bits_type = ctypes.c_uint32
else:
raise ImageDecodeException(
'Unsupported compression: %r' % (filename or file))
else:
raise ImageDecodeException(
'Unsupported bit count %d: %r' % (bitcount, filename or file))
pitch = (pitch + 3) & ~3
packed_width = pitch // ctypes.sizeof(bits_type)
if bitcount < 16 and compression == BI_RGB:
clr_used = info_header.biClrUsed or (1 << bitcount)
palette = to_ctypes(buffer, palette_offset, RGBQUAD * clr_used)
bits = to_ctypes(buffer, bits_offset,
bits_type * packed_width * height)
return decoder(bits, palette, width, height, pitch, pitch_sign)
elif bitcount >= 16 and compression == BI_RGB:
bits = to_ctypes(buffer, bits_offset,
bits_type * (packed_width * height))
return decoder(bits, None, width, height, pitch, pitch_sign)
elif compression == BI_BITFIELDS:
if info_header.biSize >= ctypes.sizeof(BITMAPV4HEADER):
info_header = to_ctypes(buffer, info_header_offset,
BITMAPV4HEADER)
r_mask = info_header.bV4RedMask
g_mask = info_header.bV4GreenMask
b_mask = info_header.bV4BlueMask
else:
fields_offset = info_header_offset + \
ctypes.sizeof(BITMAPINFOHEADER)
fields = to_ctypes(buffer, fields_offset, RGBFields)
r_mask = fields.red
g_mask = fields.green
b_mask = fields.blue
class _BitsArray(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [
('data', bits_type * packed_width * height),
]
bits = to_ctypes(buffer, bits_offset, _BitsArray).data
return decoder(bits, r_mask, g_mask, b_mask,
width, height, pitch, pitch_sign)
def decode_1bit(bits, palette, width, height, pitch, pitch_sign):
rgb_pitch = (((pitch << 3) + 7) & ~0x7) * 3
buffer = (ctypes.c_ubyte * (height * rgb_pitch))()
i = 0
for row in bits:
for packed in row:
for _ in range(8):
rgb = palette[(packed & 0x80) >> 7]
buffer[i] = rgb.rgbRed
buffer[i + 1] = rgb.rgbGreen
buffer[i + 2] = rgb.rgbBlue
i += 3
packed <<= 1
return ImageData(width, height, 'RGB', buffer, pitch_sign * rgb_pitch)
def decode_4bit(bits, palette, width, height, pitch, pitch_sign):
rgb_pitch = (((pitch << 1) + 1) & ~0x1) * 3
buffer = (ctypes.c_ubyte * (height * rgb_pitch))()
i = 0
for row in bits:
for packed in row:
for index in ((packed & 0xf0) >> 4, packed & 0xf):
rgb = palette[index]
buffer[i] = rgb.rgbRed
buffer[i + 1] = rgb.rgbGreen
buffer[i + 2] = rgb.rgbBlue
i += 3
return ImageData(width, height, 'RGB', buffer, pitch_sign * rgb_pitch)
def decode_8bit(bits, palette, width, height, pitch, pitch_sign):
rgb_pitch = pitch * 3
buffer = (ctypes.c_ubyte * (height * rgb_pitch))()
i = 0
for row in bits:
for index in row:
rgb = palette[index]
buffer[i] = rgb.rgbRed
buffer[i + 1] = rgb.rgbGreen
buffer[i + 2] = rgb.rgbBlue
i += 3
return ImageData(width, height, 'RGB', buffer, pitch_sign * rgb_pitch)
def decode_24bit(bits, palette, width, height, pitch, pitch_sign):
buffer = (ctypes.c_ubyte * (height * pitch))()
ctypes.memmove(buffer, bits, len(buffer))
return ImageData(width, height, 'BGR', buffer, pitch_sign * pitch)
def decode_32bit_rgb(bits, palette, width, height, pitch, pitch_sign):
buffer = (ctypes.c_ubyte * (height * pitch))()
ctypes.memmove(buffer, bits, len(buffer))
return ImageData(width, height, 'BGRA', buffer, pitch_sign * pitch)
def get_shift(mask):
if not mask:
return 0
# Shift down
shift = 0
while not (1 << shift) & mask:
shift += 1
# Shift up
shift_up = 0
while (mask >> shift) >> shift_up:
shift_up += 1
s = shift - (8 - shift_up)
if s < 0:
return 0, -s
else:
return s, 0
def decode_bitfields(bits, r_mask, g_mask, b_mask,
width, height, pitch, pitch_sign):
r_shift1, r_shift2 = get_shift(r_mask)
g_shift1, g_shift2 = get_shift(g_mask)
b_shift1, b_shift2 = get_shift(b_mask)
rgb_pitch = 3 * len(bits[0])
buffer = (ctypes.c_ubyte * (height * rgb_pitch))()
i = 0
for row in bits:
for packed in row:
buffer[i] = (packed & r_mask) >> r_shift1 << r_shift2
buffer[i+1] = (packed & g_mask) >> g_shift1 << g_shift2
buffer[i+2] = (packed & b_mask) >> b_shift1 << b_shift2
i += 3
return ImageData(width, height, 'RGB', buffer, pitch_sign * rgb_pitch)
def get_decoders():
return [BMPImageDecoder()]
def get_encoders():
return []
| bsd-3-clause |
SLongofono/448_Project4 | Documentation/Prototype/Compare_Songs.py | 1 | 5320 | ## @file Compare_Songs.py
# Compare Songs
# @brief Functions associated with the collection of tracks to compare against
# @details This file describes the methods by which we collect songs to process
# against our user profile.
import sys
import spotipy
import spotipy.util as util
import Assemble_Profile
import config_obj
# Get user credentials object
user = config_obj.get_user()
## @var scope
# @brief describes the permissions associated with the authorization token
scope = 'user-library-read'
## compareFeatured
# @brief Gets songs from featured playlists and returns a list of audio features
# @param user a user to establish a usageToken
# @param lim the number of playlists to retrieve
# @param debug prints debug info if true
# @return A list of vectorized versions of audio features
# @details Grabs a number of spotify's featured playlists, as specified by the lim
# parameter. For each track within each playlist, it gets the audio features,
# converts it to a vector, and finally returns a list of all the vectors
#
def compareFeatured(user, lim=20, debug=True):
usageToken = util.prompt_for_user_token(username=user['username'],
client_id=user['client_id'],
client_secret=user['client_secret'],
redirect_uri=user['redirect_uri'],
scope=scope)
if usageToken:
sp = spotipy.Spotify(auth=usageToken)
results = sp.featured_playlists(limit=lim)
vectors = []
for playlist in results['playlists']['items']:
results = sp.user_playlist_tracks(user="spotify", playlist_id=playlist['id'])
for item in results['items']:
track = item['track']
try:
featureVector = Assemble_Profile.getVectorFromTrack(sp, sp.audio_features([track['id']])[0], track['artists'])
vectors.append(featureVector)
except: pass
if debug:
print vectors
return vectors
## compareNewReleases
# @brief Compares new releases with the user's agrregated preferences
# @param lim the number of new releases to compare
# @return A list of vectorized versions of audio features
# @details grabs a number of spotify's new releases, as specified by the lim
# parameter. For each track, it gets the audio features, converts it
# to a vector, and finally returns a list of all the vectors
#
def compareNewReleases(user, lim=20, debug=False):
usageToken = util.prompt_for_user_token(username=user['username'],
client_id=user['client_id'],
client_secret=user['client_secret'],
redirect_uri=user['redirect_uri'],
scope=scope)
if usageToken:
sp = spotipy.Spotify(auth=usageToken)
results = sp.new_releases(limit=lim)
vectors = []
for album in results['albums']['items']:
tracks = sp.album_tracks(album['id'])
for track in tracks['items']:
try:
featureVector = Assemble_Profile.getVectorFromTrack(sp, sp.audio_features([track['id']])[0], track['artists'])
vectors.append(featureVector)
except: pass
if debug:
print vectors
return vectors
## compareSearch
# @brief Gets songs from a search query and returns a list of audio features
# @param user a user to establish a usageToken
# @param query a string to be used with the spotify search feature
# @param lim the number of search results to retrieve
# @param debug prints debug info if true
# @return A list of vectorized versions of audio features
# @details searches spotify with the given query, and gets as many results as specified
# by the lim parameter. For each track, it gets the audio features, converts it
# to a vector, and finally returns a list of all the vectors
#
def compareSearch(user, query, lim=20, debug=False):
usageToken = util.prompt_for_user_token(username=user['username'],
client_id=user['client_id'],
client_secret=user['client_secret'],
redirect_uri=user['redirect_uri'],
scope=scope)
if usageToken:
sp = spotipy.Spotify(auth=usageToken)
results = sp.search(query, limit=lim)
vectors = []
for track in results['tracks']['items']:
try:
featureVector = Assemble_Profile.getVectorFromTrack(sp, sp.audio_features([track['id']])[0], track['artists'])
vectors.append(featureVector)
except: pass
if debug:
print vectors
return vectors
# a menu system to test the various comparison methods
if __name__ == '__main__':
command = raw_input("Enter the number associated with the desired command:\n 1. Compare with New Releases\n 2. Compare with Featured Playlists\n 3. Compare with Search\n")
if command == "1":
lim = int(raw_input("How many new releases would you like to compare (Max 50)? "))
if lim <= 50 and lim > 0:
compareNewReleases(user, lim, True)
else:
print "Invalid input"
elif command == "2":
lim = int(raw_input("How many new playlists would you like to compare (Max 10)?\n(Note: Playlists can be very long, requesting more than 1 playlist may take a long time)\n"))
if lim <= 10 and lim > 0:
compareFeatured(user, lim, True)
else:
print "Invalid input"
elif command == "3":
query = raw_input("Please enter search query: ")
lim = int(raw_input("How many new releases would you like to compare (Max 50)? "))
if lim <= 50 and lim > 0:
compareSearch(user, query, lim, True)
else:
print "Invalid input"
else:
print "Invalid command"
| mit |
vveerava/Openstack | neutron/plugins/ml2/rpc.py | 7 | 9218 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import messaging
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import constants as q_const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.i18n import _LW
from neutron import manager
from neutron.openstack.common import log
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
# mixins and eventually remove the direct dependencies on type_tunnel.
LOG = log.getLogger(__name__)
class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
# history
# 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC
# 1.2 Support get_devices_details_list
# 1.3 get_device_details rpc signature upgrade to obtain 'host' and
# return value to include fixed_ips and device_owner for
# the device port
target = messaging.Target(version='1.3')
def __init__(self, notifier, type_manager):
self.setup_tunnel_callback_mixin(notifier, type_manager)
super(RpcCallbacks, self).__init__()
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s details requested by agent "
"%(agent_id)s with host %(host)s",
{'device': device, 'agent_id': agent_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
port_context = plugin.get_bound_port_context(rpc_context,
port_id,
host)
if not port_context:
LOG.warning(_LW("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
return {'device': device}
segment = port_context.bound_segment
port = port_context.current
if not segment:
LOG.warning(_LW("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s not "
"bound, vif_type: %(vif_type)s"),
{'device': device,
'agent_id': agent_id,
'network_id': port['network_id'],
'vif_type': port[portbindings.VIF_TYPE]})
return {'device': device}
new_status = (q_const.PORT_STATUS_BUILD if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
plugin.update_port_status(rpc_context,
port_id,
new_status,
host)
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port_id,
'mac_address': port['mac_address'],
'admin_state_up': port['admin_state_up'],
'network_type': segment[api.NETWORK_TYPE],
'segmentation_id': segment[api.SEGMENTATION_ID],
'physical_network': segment[api.PHYSICAL_NETWORK],
'fixed_ips': port['fixed_ips'],
'device_owner': port['device_owner'],
'profile': port[portbindings.PROFILE]}
LOG.debug("Returning: %s", entry)
return entry
def get_devices_details_list(self, rpc_context, **kwargs):
return [
self.get_device_details(
rpc_context,
device=device,
**kwargs
)
for device in kwargs.pop('devices', [])
]
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s no longer exists at agent "
"%(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
port_exists = True
if (host and not plugin.port_bound_to_host(rpc_context,
port_id, host)):
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
return {'device': device,
'exists': port_exists}
port_exists = bool(plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_DOWN,
host))
return {'device': device,
'exists': port_exists}
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s up at agent %(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(device)
if (host and not plugin.port_bound_to_host(rpc_context,
port_id, host)):
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
return
port_id = plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_ACTIVE,
host)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if (l3plugin and
utils.is_extension_supported(l3plugin,
q_const.L3_DISTRIBUTED_EXT_ALIAS)):
try:
port = plugin._get_port(rpc_context, port_id)
l3plugin.dvr_vmarp_table_update(rpc_context, port, "add")
except exceptions.PortNotFound:
LOG.debug('Port %s not found during ARP update', port_id)
class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin,
sg_rpc.SecurityGroupAgentRpcApiMixin,
type_tunnel.TunnelAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
update_dhcp_port, and removed get_dhcp_port methods.
"""
def __init__(self, topic):
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
target = messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def network_delete(self, context, network_id):
cctxt = self.client.prepare(topic=self.topic_network_delete,
fanout=True)
cctxt.cast(context, 'network_delete', network_id=network_id)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
cctxt = self.client.prepare(topic=self.topic_port_update,
fanout=True)
cctxt.cast(context, 'port_update', port=port,
network_type=network_type, segmentation_id=segmentation_id,
physical_network=physical_network)
| apache-2.0 |
curtisblack/R2D2 | BluetoothSerial.py | 1 | 1624 | import os
import logging
import serial
from RPi.GPIO import *
class BluetoothSerial:
def __init__(self, usb, pin):
self.usb = usb
self.pin = pin
self.port = None
self.buffer = ""
setmode(BCM)
setup(self.pin, IN)
self.Connected = False
self.JustConnected = False
self.JustDisconnected = False
def Update(self):
c = True if input(22) else False
if self.port == None:
if os.path.exists("/dev/ttyUSB" + str(self.usb)):
self.port = serial.Serial("/dev/ttyUSB" + str(self.usb), 38400)
self.port.flushInput()
else:
c = False
return
self.JustConnected = False
self.JustDisconnected = False
if c != self.Connected:
self.Connected = c
if c:
self.JustConnected = True
else:
self.JustDisconnected = True
self.port.flushInput()
logging.info("Bluetooth Serial " + str(self.usb) + " " + ("Connected" if c else "Disconnected"))
if self.Connected:
a = self.port.inWaiting()
for b in range(a):
c = self.port.read()
if c == '\r' or c == '\n':
if len(self.buffer) > 0:
logging.info("Bluetooth Serial " + str(self.usb) + " Message: " + self.buffer)
self.buffer = ""
else:
self.buffer += c
def Write(self, data):
if self.Connected:
self.port.write(data)
| mit |
theyaa/Impala | thirdparty/thrift-0.9.0/test/py/TestSocket.py | 99 | 2888 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir', default='gen-py')
options, args = parser.parse_args()
del sys.argv[1:] # clean up hack so unittest doesn't complain
sys.path.insert(0, options.genpydir)
sys.path.insert(0, glob.glob('../../lib/py/build/lib.*')[0])
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
import unittest
import time
import socket
import random
from optparse import OptionParser
class TimeoutTest(unittest.TestCase):
def setUp(self):
for i in xrange(50):
try:
# find a port we can use
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = random.randint(10000, 30000)
self.listen_sock.bind(('localhost', self.port))
self.listen_sock.listen(5)
break
except:
if i == 49:
raise
def testConnectTimeout(self):
starttime = time.time()
try:
leaky = []
for i in xrange(100):
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
leaky.append(socket)
except:
self.assert_(time.time() - starttime < 5.0)
def testWriteTimeout(self):
starttime = time.time()
try:
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
lsock = self.listen_sock.accept()
while True:
socket.write("hi" * 100)
except:
self.assert_(time.time() - starttime < 5.0)
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TimeoutTest))
testRunner = unittest.TextTestRunner(verbosity=2)
testRunner.run(suite)
| apache-2.0 |
michigraber/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
agancsos/misc | crawler.py | 1 | 2905 | #!/bin/python
import os
import sys
import re
import urllib2
import random
import datetime
dbfile="";
def getSQLiteRows(sql):
key=str(random.randint(0,9999));
os.system("sqlite3 \""+dbfile+"\" \""+sql+"\" > db_file_"+key);
all_content="";
if(os.path.exists("db_file_"+key)):
all_content=open("db_file_"+key,"r").read();
os.system("rm db_file_"+key);
temp_array=all_content.split("\n");
final_array=[];
i=0;
while i<len(temp_array)-1:
final_array.append(temp_array[i]);
i+=1;
return final_array;
def right_pad(mstr,mlen,mpad):
final="";
i=len(mstr);
while i<mlen:
final=final+mpad;
i+=1;
return mstr+final;
def about():
print right_pad("",80,"=");
print "Welcome to the master crawler";
print "Author : Abel Gancsos";
print "Version : 1.0.0";
print "Description: This crawls sites from the"
print " links table for more 411."
print "Table : links";
print right_pad("",80,"=");
class crawler:
id=None;
timestamp=None;
links=None;
start_url=None;
def __init__(self):
self.id=right_pad(str(random.randint(0,99999)),10,"0");
self.timestamp=str(datetime.datetime.now());
self.links=getSQLiteRows("select url from links order by last_updated desc");
def run(self):
print right_pad("",80,"=");
print "ID: "+self.id+"\t Timestamp: "+self.timestamp;
print "Running....";
print right_pad("",80,"=");
i=0;
while(i<len(self.links)):
request=urllib2.Request(self.links[i]);
handler=urllib2.urlopen(request);
content=handler.read();
for url in re.findall('''href=["']http://(.[^"']+)["']''',content, re.I):
url=url.replace("https://","");
temp_rows=getSQLiteRows("select* from links where url='http://"+url.replace("https://","")+"'");
if(len(temp_rows)==0):
os.system("sqlite3 \"db/hypo_db.hypo\" \"insert into links (url) values ('http://"+url.replace("https://","")+"')\"");
request2=urllib2.Request("http://"+url);
handler2=urllib2.urlopen(request2);
content2=handler2.read();
for url2 in re.findall('''href=["'](.[^"']+)["']''', content2, re.I):
url2=url2.replace("https://","");
temp_rows=getSQLiteRows("select* from links where url='http://"+url2.replace("https://","")+"'");
if(len(temp_rows)==0):
if(url2.replace("http://","")==url2):
os.system("sqlite3 \"db/hypo_db.hypo\" \"insert into links (url) values ('http://"+url2.replace("http://","")+"')\"");
else:
temp_rows=getSQLiteRows("select* from links where url='http://"+url.replace("https://","")+"/"+url2.replace("https://","")+"'");
if(len(temp_rows)==0):
os.system("sqlite3 \"db/hypo_db.hypo\" \"insert into links (url) values ('http://"+url.replace("https://","")+"/"+url2.replace("https://","")+"')\"");
i+=1;
print right_pad("",80,"=");
if len(sys.argv)>1 and sys.argv[1]=="-h":
about();
else:
session=crawler();
session.run();
print "\n";
| mit |
Jionglun/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/draw.py | 603 | 6456 | from javascript import console
from browser import timer
import math
class Queue:
def __init__(self):
self._list=[]
def empty(self):
return len(self._list) == 0
def put(self, element):
self._list.append(element)
def get(self):
if len(self._list) == 0:
raise BaseError
_element=self._list[0]
if len(self._list) == 1:
self._list=[]
else:
self._list=self._list[1:]
return _element
dm={}
def aaline(canvas, color, startpos, endpos, width, outline, blend=1):
#console.log("aaline")
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
_dl=DrawLine(startpos[0], startpos[1], endpos[0], endpos[1], color,
width, outline, speed=10)
dm[canvas].add_line(_dl) #color, startpos, endpos, width, outline)
def aapolygon(canvas, color, coordinates, width, outline, blend=1):
#console.log("aapolygon")
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
_dp=DrawPolygon(coordinates, color, width, outline, speed=10)
dm[canvas].add_polygon(_dp)
def aapolygon_bg(canvas, shape):
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
dm[canvas].add_polygon_bg(shape)
class DrawPolygon:
def __init__(self, coordinates, color, width, outline, speed=10):
self.moveTo=coordinates[0]
self.segments=coordinates[1:]
self.color=color
self.width=width
self.outline=outline
class DrawLine:
def __init__(self, x0, y0, x1, y1, color, width, outline, speed=None):
self._type='LINE'
self._x0=x0
self._x1=x1
self._y0=y0
self._y1=y1
self._speed=speed
self._color=color
self._width=width
self._outline=outline
def get_segments(self):
if self._speed==0: #no animate since speed is 0 (return one segment)
return [{'type': self._type, 'x0':self._x0, 'y0': self._y0,
'x1': self._x1, 'y1': self._y1, 'color': self._color}]
#need to figure out how to translate speed into pixels, etc
#maybe speed is pixels per ms? 10 = 10 pixels per millisecond?
_x=(self._x1 - self._x0)
_x*=_x
_y=(self._y1 - self._y0)
_y*=_y
_distance=math.sqrt(_x + _y)
if _distance < self._speed: # we can do this in one segment
return [{'type': self._type, 'x0':self._x0, 'y0': self._y0,
'x1': self._x1, 'y1': self._y1, 'color': self._color}]
_segments=[]
_num_segments=math.floor(_distance/self._speed)
_pos_x=self._x0
_pos_y=self._y0
_x_diff=self._x1 - self._x0
_y_diff=self._y1 - self._y0
for _i in range(1,_num_segments+1):
_x=self._x0 + _i/_num_segments * _x_diff
_y=self._y0 + _i/_num_segments * _y_diff
_segments.append({'type': 'LINE': 'x0': _pos_x, 'y0': _pos_y,
'x1': _x, 'y1': _y, 'color': self._color})
_pos_x=_x
_pos_y=_y
if _pos_x != self._x1 or _pos_y != self._y1:
_segments.append({'type': 'LINE': 'x0': _pos_x, 'y0': _pos_y,
'x1': _x, 'y1': _y, 'color': self._color})
return _segments
class DrawManager:
def __init__(self, canvas):
self._queue=Queue()
self._canvas=canvas
self._ctx=canvas.getContext('2d')
self._interval=None
self._bg=None #used to capture bg before polygon is drawn
def __del__(self):
if self._interval is not None:
timer.clear_Interval(self._interval)
self._interval=None
del self._queue
def rect_from_shape(self, points):
_width=self._canvas.width
_height=self._canvas.height
_min_x=_width
_max_x=0
_min_y=_height
_max_y=0
for _point in points:
_x, _y = _point
_min_x=min(_min_x, _x)
_min_y=min(_min_y, _y)
_max_x=max(_max_x, _x)
_max_y=max(_max_y, _y)
_w2=_width/2
_h2=_height/2
return math.floor(_min_x-0.5)+_w2, math.floor(_min_y-0.5+_h2), \
math.ceil(_max_x+0.5)+_w2, math.ceil(_max_y+0.5+_h2)
def __interval(self):
if not self._queue.empty():
_dict=self._queue.get()
if _dict['type'] == 'LINE':
self._ctx.beginPath()
self._ctx.moveTo(_dict['x0'], _dict['y0'])
self._ctx.lineTo(_dict['x1'], _dict['y1'])
#if _dict['outline'] is not None:
# self._ctx.strokeStyle=_dict['outline'] #set line color
if _dict['color'] is not None:
self._ctx.fillStyle=_dict['color']
self._ctx.stroke()
elif _dict['type'] == 'POLYGON':
if self._bg is not None:
self._ctx.putImageData(self._bg[0], self._bg[1], self._bg[2])
console.log(self._bg[0])
self._bg=None
self._ctx.beginPath()
_moveTo=_dict['moveTo']
self._ctx.moveTo(_moveTo[0], _moveTo[1])
for _segment in _dict['segments']:
self._ctx.lineTo(_segment[0], _segment[1])
if _dict['width']:
self._ctx.lineWidth=_dict['width']
if _dict['outline']:
self._ctx.strokeStyle=_dict['outline']
if _dict['color']:
self._ctx.fillStyle=_dict['color']
self._ctx.fill()
self._ctx.closePath()
self._ctx.stroke()
elif _dict['type'] == 'POLYGON_BG':
_x0,_y0,_x1,_y1=self.rect_from_shape(_dict['shape'])
console.log(_x0,_y0,_x1, _y1)
self._bg=[]
self._bg.append(self._ctx.getImageData(_x0,_y0,abs(_x1)-abs(_x0),abs(_y1)-abs(_y0)))
self._bg.append(_x0)
self._bg.append(_y0)
def process(self):
self._interval=timer.set_interval(self.__interval, 10)
def add_line(self, dl): #color, startpos, endpos, width, outline, speed=None):
for _segment in dl.get_segments():
self._queue.put(_segment)
def add_polygon(self, dp):
self._queue.put({'type': 'POLYGON', 'moveTo': dp.moveTo,
'segments': dp.segments, 'color': dp.color,
'outline': dp.outline, 'width': dp.width})
def add_polygon_bg(self, shape):
self._queue.put({'type': 'POLYGON_BG', 'shape': shape})
| gpl-3.0 |
oneminot/audacity | lib-src/lv2/sord/waflib/Tools/dbus.py | 318 | 1142 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task,Errors
from waflib.TaskGen import taskgen_method,before_method
@taskgen_method
def add_dbus_file(self,filename,prefix,mode):
if not hasattr(self,'dbus_lst'):
self.dbus_lst=[]
if not'process_dbus'in self.meths:
self.meths.append('process_dbus')
self.dbus_lst.append([filename,prefix,mode])
@before_method('apply_core')
def process_dbus(self):
for filename,prefix,mode in getattr(self,'dbus_lst',[]):
node=self.path.find_resource(filename)
if not node:
raise Errors.WafError('file not found '+filename)
tsk=self.create_task('dbus_binding_tool',node,node.change_ext('.h'))
tsk.env.DBUS_BINDING_TOOL_PREFIX=prefix
tsk.env.DBUS_BINDING_TOOL_MODE=mode
class dbus_binding_tool(Task.Task):
color='BLUE'
ext_out=['.h']
run_str='${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}'
shell=True
def configure(conf):
dbus_binding_tool=conf.find_program('dbus-binding-tool',var='DBUS_BINDING_TOOL')
| gpl-2.0 |
promptworks/keystone | keystone/policy/core.py | 5 | 4225 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Policy service."""
import abc
from oslo_config import cfg
import six
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone import notifications
CONF = cfg.CONF
@dependency.provider('policy_api')
class Manager(manager.Manager):
"""Default pivot point for the Policy backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
_POLICY = 'policy'
def __init__(self):
super(Manager, self).__init__(CONF.policy.driver)
def create_policy(self, policy_id, policy, initiator=None):
ref = self.driver.create_policy(policy_id, policy)
notifications.Audit.created(self._POLICY, policy_id, initiator)
return ref
def get_policy(self, policy_id):
try:
return self.driver.get_policy(policy_id)
except exception.NotFound:
raise exception.PolicyNotFound(policy_id=policy_id)
def update_policy(self, policy_id, policy, initiator=None):
if 'id' in policy and policy_id != policy['id']:
raise exception.ValidationError('Cannot change policy ID')
try:
ref = self.driver.update_policy(policy_id, policy)
except exception.NotFound:
raise exception.PolicyNotFound(policy_id=policy_id)
notifications.Audit.updated(self._POLICY, policy_id, initiator)
return ref
@manager.response_truncated
def list_policies(self, hints=None):
# NOTE(henry-nash): Since the advantage of filtering or list limiting
# of policies at the driver level is minimal, we leave this to the
# caller.
return self.driver.list_policies()
def delete_policy(self, policy_id, initiator=None):
try:
ret = self.driver.delete_policy(policy_id)
except exception.NotFound:
raise exception.PolicyNotFound(policy_id=policy_id)
notifications.Audit.deleted(self._POLICY, policy_id, initiator)
return ret
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
def _get_list_limit(self):
return CONF.policy.list_limit or CONF.list_limit
@abc.abstractmethod
def enforce(self, context, credentials, action, target):
"""Verify that a user is authorized to perform action.
For more information on a full implementation of this see:
`keystone.policy.backends.rules.Policy.enforce`
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_policy(self, policy_id, policy):
"""Store a policy blob.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_policies(self):
"""List all policies."""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_policy(self, policy_id):
"""Retrieve a specific policy blob.
:raises: keystone.exception.PolicyNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_policy(self, policy_id, policy):
"""Update a policy blob.
:raises: keystone.exception.PolicyNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_policy(self, policy_id):
"""Remove a policy blob.
:raises: keystone.exception.PolicyNotFound
"""
raise exception.NotImplemented() # pragma: no cover
| apache-2.0 |
nandhp/youtube-dl | youtube_dl/extractor/pornovoisines.py | 15 | 3390 | # coding: utf-8
from __future__ import unicode_literals
import re
import random
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class PornoVoisinesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornovoisines\.com/showvideo/(?P<id>\d+)/(?P<display_id>[^/]+)'
_VIDEO_URL_TEMPLATE = 'http://stream%d.pornovoisines.com' \
'/static/media/video/transcoded/%s-640x360-1000-trscded.mp4'
_SERVER_NUMBERS = (1, 2)
_TEST = {
'url': 'http://www.pornovoisines.com/showvideo/1285/recherche-appartement/',
'md5': '5ac670803bc12e9e7f9f662ce64cf1d1',
'info_dict': {
'id': '1285',
'display_id': 'recherche-appartement',
'ext': 'mp4',
'title': 'Recherche appartement',
'description': 'md5:819ea0b785e2a04667a1a01cdc89594e',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140925',
'duration': 120,
'view_count': int,
'average_rating': float,
'categories': ['Débutantes', 'Scénario', 'Sodomie'],
'age_limit': 18,
}
}
@classmethod
def build_video_url(cls, num):
return cls._VIDEO_URL_TEMPLATE % (random.choice(cls._SERVER_NUMBERS), num)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, video_id)
video_url = self.build_video_url(video_id)
title = self._html_search_regex(
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
description = self._html_search_regex(
r'<article id="descriptif">(.+?)</article>',
webpage, 'description', fatal=False, flags=re.DOTALL)
thumbnail = self._search_regex(
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = 'http://www.pornovoisines.com/%s' % thumbnail
upload_date = unified_strdate(self._search_regex(
r'Publié le ([\d-]+)', webpage, 'upload date', fatal=False))
duration = int_or_none(self._search_regex(
'Durée (\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'(\d+) vues', webpage, 'view count', fatal=False))
average_rating = self._search_regex(
r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
categories = self._html_search_meta(
'keywords', webpage, 'categories', fatal=False)
if categories:
categories = [category.strip() for category in categories.split(',')]
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'categories': categories,
'age_limit': 18,
}
| unlicense |
patchew-project/patchew | tests/test_ansi2html.py | 2 | 22373 | # Test conversion of ANSI sequences into HTML
#
# Copyright (C) 2018 Red Hat, Inc.
#
# Author: Paolo Bonzini <[email protected]>
import unittest
from patchew.logviewer import ansi2html, ansi2text, ANSI2TextConverter
class ANSI2HTMLTest(unittest.TestCase):
def assertAnsi(self, test, expected, **kwargs):
self.assertEqual(
"".join(ansi2html(test, **kwargs)),
'<pre class="ansi">%s</pre>' % expected,
repr(test),
)
def assertBlackBg(self, test, expected):
self.assertAnsi(test, expected)
def assertWhiteBg(self, test, expected):
self.assertAnsi(test, expected, white_bg=True)
# basic formatting tests
def test_basic(self):
self.assertBlackBg("\tb", " b")
self.assertBlackBg("\t\ta", " a")
self.assertBlackBg("a\tb", "a b")
self.assertBlackBg("ab\tc", "ab c")
self.assertBlackBg("a\nbc", "a\nbc")
self.assertBlackBg("a\f", "a\n<hr>")
self.assertBlackBg("a\n\f", "a\n\n<hr>")
self.assertBlackBg("<", "<")
self.assertBlackBg("\x07", "🔔")
# backspace and carriage return
def test_set_pos(self):
self.assertBlackBg("abc\b\bBC", "aBC")
self.assertBlackBg("a\b<", "<")
self.assertBlackBg("<\ba", "a")
self.assertBlackBg("a\b\bbc", "bc")
self.assertBlackBg("a\rbc", "bc")
self.assertBlackBg("a\nb\bc", "a\nc")
self.assertBlackBg("a\t\bb", "a b")
self.assertBlackBg("a\tb\b\bc", "a cb")
self.assertBlackBg("01234567\r\tb", "01234567b")
# Escape sequences
def test_esc_parsing(self):
self.assertBlackBg("{\x1b%}", "{}")
self.assertBlackBg("{\x1b[0m}", "{}")
self.assertBlackBg("{\x1b[m}", "{}")
self.assertBlackBg("{\x1b[0;1;7;0m}", "{}")
self.assertBlackBg("{\x1b[1;7m\x1b[m}", "{}")
self.assertBlackBg("{\x1b]test\x1b\\}", "{}")
self.assertBlackBg("{\x1b]test\x07}", "{}")
self.assertBlackBg("{\x1b]test\x1b[0m\x07}", "{}")
self.assertBlackBg("{\x1b]test\x1b[7m\x07}", "{}")
# ESC [2J
def test_clear_screen(self):
self.assertBlackBg("a\n\x1b[2Jb", "a\n<hr>b")
self.assertBlackBg("a\x1b[2J", "a<hr> ")
self.assertBlackBg("a\x1b[2Jb", "a<hr> b")
# ESC [C and ESC [D
def test_horiz_movement(self):
self.assertBlackBg("abc\x1b[2DB", "aBc")
self.assertBlackBg("abc\x1b[3CD", "abc D")
self.assertBlackBg("abcd\x1b[3DB\x1b[1CD", "aBcD")
self.assertBlackBg("abc\x1b[0CD", "abc D")
self.assertBlackBg("abc\x1b[CD", "abc D")
# ESC [K
def test_clear_line(self):
self.assertBlackBg("\x1b[Kabcd", "abcd")
self.assertBlackBg("abcd\r\x1b[K", "")
self.assertBlackBg("abcd\b\x1b[K", "abc")
self.assertBlackBg("abcd\r\x1b[KDef", "Def")
self.assertBlackBg("abcd\b\x1b[KDef", "abcDef")
self.assertBlackBg("abcd\r\x1b[0K", "")
self.assertBlackBg("abcd\b\x1b[0K", "abc")
self.assertBlackBg("abcd\r\x1b[1K", "abcd")
self.assertBlackBg("abcd\b\x1b[1K", " d")
self.assertBlackBg("abcd\r\x1b[2K", "")
self.assertBlackBg("abcd\b\x1b[2K", " ")
self.assertBlackBg("abcd\r\x1b[2KDef", "Def")
self.assertBlackBg("abcd\b\x1b[2KDef", " Def")
# basic style formatting and bold
def test_basic_styles(self):
self.assertBlackBg("\x1b[0m", "")
self.assertWhiteBg("\x1b[0m", "")
self.assertBlackBg("A\x1b[0mBC", "ABC")
self.assertWhiteBg("A\x1b[0mBC", "ABC")
self.assertBlackBg("\x1b[30;41m", "")
self.assertWhiteBg("\x1b[30;41m", "")
self.assertBlackBg("\x1b[1mABC", '<span class="HIW BOLD">ABC</span>')
self.assertWhiteBg("\x1b[1mABC", '<span class="BOLD">ABC</span>')
self.assertBlackBg("A\x1b[1mBC", 'A<span class="HIW BOLD">BC</span>')
self.assertWhiteBg("A\x1b[1mBC", 'A<span class="BOLD">BC</span>')
self.assertBlackBg("\x1b[1mAB\x1b[0mC", '<span class="HIW BOLD">AB</span>C')
self.assertWhiteBg("\x1b[1mAB\x1b[0mC", '<span class="BOLD">AB</span>C')
self.assertBlackBg("A\x1b[1mB\x1b[0mC", 'A<span class="HIW BOLD">B</span>C')
self.assertWhiteBg("A\x1b[1mB\x1b[0mC", 'A<span class="BOLD">B</span>C')
self.assertBlackBg(
"A\x1b[1mB\x1b[0m\x1b[1mC", 'A<span class="HIW BOLD">BC</span>'
)
self.assertWhiteBg("A\x1b[1mB\x1b[0m\x1b[1mC", 'A<span class="BOLD">BC</span>')
# basic dim and dim+bold tests
def test_dim_bold(self):
self.assertBlackBg("\x1b[2mABC", '<span class="HIK">ABC</span>')
self.assertWhiteBg("\x1b[2mABC", '<span class="HIK">ABC</span>')
self.assertBlackBg("\x1b[2;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[2;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertBlackBg("\x1b[1;21mABC", "ABC")
self.assertWhiteBg("\x1b[1;21mABC", "ABC")
self.assertBlackBg("\x1b[2;21mABC", "ABC")
self.assertWhiteBg("\x1b[2;21mABC", "ABC")
self.assertBlackBg("\x1b[1;22mABC", "ABC")
self.assertWhiteBg("\x1b[1;22mABC", "ABC")
self.assertBlackBg("\x1b[2;22mABC", "ABC")
self.assertWhiteBg("\x1b[2;22mABC", "ABC")
# background and foreground colors
def test_colors(self):
self.assertBlackBg("\x1b[31mABC", '<span class="HIR">ABC</span>')
self.assertWhiteBg("\x1b[31mABC", '<span class="HIR">ABC</span>')
self.assertBlackBg("\x1b[31;1mABC", '<span class="HIR BOLD">ABC</span>')
self.assertWhiteBg("\x1b[31;1mABC", '<span class="HIR BOLD">ABC</span>')
self.assertBlackBg("\x1b[31;2mABC", '<span class="RED">ABC</span>')
self.assertWhiteBg("\x1b[31;2mABC", '<span class="RED">ABC</span>')
self.assertBlackBg("\x1b[31;2;1mABC", '<span class="RED BOLD">ABC</span>')
self.assertWhiteBg("\x1b[31;2;1mABC", '<span class="RED BOLD">ABC</span>')
self.assertBlackBg("\x1b[31mAB\x1b[39mC", '<span class="HIR">AB</span>C')
self.assertWhiteBg("\x1b[31mAB\x1b[39mC", '<span class="HIR">AB</span>C')
self.assertBlackBg("\x1b[30mABC", '<span class="BLK">ABC</span>')
self.assertWhiteBg("\x1b[30mABC", "ABC")
self.assertBlackBg("\x1b[30;1mABC", '<span class="BLK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[30;1mABC", '<span class="BOLD">ABC</span>')
self.assertBlackBg("\x1b[30;2mABC", '<span class="BLK">ABC</span>')
self.assertWhiteBg("\x1b[30;2mABC", '<span class="HIK">ABC</span>')
self.assertBlackBg("\x1b[30;2;1mABC", '<span class="BLK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[30;2;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertBlackBg("\x1b[37mABC", "ABC")
self.assertWhiteBg("\x1b[37mABC", '<span class="WHI">ABC</span>')
self.assertBlackBg("\x1b[37;1mABC", '<span class="HIW BOLD">ABC</span>')
self.assertWhiteBg("\x1b[37;1mABC", '<span class="HIW BOLD">ABC</span>')
self.assertBlackBg("\x1b[37;2mABC", '<span class="HIK">ABC</span>')
self.assertWhiteBg("\x1b[37;2mABC", '<span class="WHI">ABC</span>')
self.assertBlackBg("\x1b[37;2;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[37;2;1mABC", '<span class="WHI BOLD">ABC</span>')
self.assertBlackBg("\x1b[46mABC", '<span class="BHIC">ABC</span>')
self.assertWhiteBg("\x1b[46mABC", '<span class="BHIC">ABC</span>')
self.assertBlackBg("\x1b[46mAB\x1b[49mC", '<span class="BHIC">AB</span>C')
self.assertWhiteBg("\x1b[46mAB\x1b[49mC", '<span class="BHIC">AB</span>C')
self.assertBlackBg("\x1b[46;31mABC", '<span class="HIR BHIC">ABC</span>')
self.assertWhiteBg("\x1b[46;31mABC", '<span class="HIR BHIC">ABC</span>')
self.assertBlackBg("\x1b[46;31;1mABC", '<span class="HIR BHIC BOLD">ABC</span>')
self.assertWhiteBg("\x1b[46;31;1mABC", '<span class="HIR BHIC BOLD">ABC</span>')
self.assertBlackBg("\x1b[46;31;2mABC", '<span class="RED BHIC">ABC</span>')
self.assertWhiteBg("\x1b[46;31;2mABC", '<span class="RED BHIC">ABC</span>')
self.assertBlackBg(
"\x1b[46;31;2;1mABC", '<span class="RED BHIC BOLD">ABC</span>'
)
self.assertWhiteBg(
"\x1b[46;31;2;1mABC", '<span class="RED BHIC BOLD">ABC</span>'
)
self.assertBlackBg("\x1b[46;37mABC", '<span class="BHIC">ABC</span>')
self.assertWhiteBg("\x1b[46;37mABC", '<span class="WHI BHIC">ABC</span>')
self.assertBlackBg("\x1b[46;37;1mABC", '<span class="HIW BHIC BOLD">ABC</span>')
self.assertWhiteBg("\x1b[46;37;1mABC", '<span class="HIW BHIC BOLD">ABC</span>')
self.assertBlackBg("\x1b[46;37;2mABC", '<span class="HIK BHIC">ABC</span>')
self.assertWhiteBg("\x1b[46;37;2mABC", '<span class="WHI BHIC">ABC</span>')
self.assertBlackBg(
"\x1b[46;37;2;1mABC", '<span class="HIK BHIC BOLD">ABC</span>'
)
self.assertWhiteBg(
"\x1b[46;37;2;1mABC", '<span class="WHI BHIC BOLD">ABC</span>'
)
# more colors
def test_colors_extra(self):
self.assertBlackBg("\x1b[90mABC", '<span class="HIK">ABC</span>')
self.assertWhiteBg("\x1b[90mABC", '<span class="HIK">ABC</span>')
self.assertBlackBg("\x1b[90;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[90;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertBlackBg("\x1b[90;2mABC", '<span class="HIK">ABC</span>')
self.assertWhiteBg("\x1b[90;2mABC", '<span class="HIK">ABC</span>')
self.assertBlackBg("\x1b[90;2;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[90;2;1mABC", '<span class="HIK BOLD">ABC</span>')
self.assertBlackBg("\x1b[97;1mABC", '<span class="HIW BOLD">ABC</span>')
self.assertWhiteBg("\x1b[97;1mABC", '<span class="HIW BOLD">ABC</span>')
self.assertBlackBg("\x1b[97;2mABC", "ABC")
self.assertWhiteBg("\x1b[97;2mABC", '<span class="WHI">ABC</span>')
self.assertBlackBg("\x1b[97;2;1mABC", '<span class="BOLD">ABC</span>')
self.assertWhiteBg("\x1b[97;2;1mABC", '<span class="WHI BOLD">ABC</span>')
self.assertBlackBg("\x1b[100mABC", '<span class="BHIK">ABC</span>')
self.assertWhiteBg("\x1b[100mABC", '<span class="BHIK">ABC</span>')
self.assertBlackBg("\x1b[38;5;120mABC", '<span class="f120">ABC</span>')
self.assertWhiteBg("\x1b[38;5;120mABC", '<span class="f120">ABC</span>')
self.assertBlackBg("\x1b[38;5;120;2mABC", '<span class="df120">ABC</span>')
self.assertWhiteBg("\x1b[38;5;120;2mABC", '<span class="df120">ABC</span>')
self.assertBlackBg("\x1b[48;5;120mABC", '<span class="b120">ABC</span>')
self.assertWhiteBg("\x1b[48;5;120mABC", '<span class="b120">ABC</span>')
self.assertBlackBg("\x1b[48;5;120;2mABC", '<span class="HIK b120">ABC</span>')
self.assertWhiteBg("\x1b[48;5;120;2mABC", '<span class="HIK b120">ABC</span>')
# italic, underline, strikethrough
def test_text_variants(self):
self.assertBlackBg("\x1b[3mABC", '<span class="ITA">ABC</span>')
self.assertWhiteBg("\x1b[3mABC", '<span class="ITA">ABC</span>')
self.assertBlackBg("\x1b[3;31mABC", '<span class="HIR ITA">ABC</span>')
self.assertWhiteBg("\x1b[3;31mABC", '<span class="HIR ITA">ABC</span>')
self.assertBlackBg("\x1b[3mAB\x1b[23mC", '<span class="ITA">AB</span>C')
self.assertWhiteBg("\x1b[3mAB\x1b[23mC", '<span class="ITA">AB</span>C')
self.assertBlackBg(
"\x1b[3;31mAB\x1b[23mC",
'<span class="HIR ITA">AB</span><span class="HIR">C</span>',
)
self.assertWhiteBg(
"\x1b[3;31mAB\x1b[23mC",
'<span class="HIR ITA">AB</span><span class="HIR">C</span>',
)
self.assertBlackBg("\x1b[4mABC", '<span class="UND">ABC</span>')
self.assertWhiteBg("\x1b[4mABC", '<span class="UND">ABC</span>')
self.assertBlackBg("\x1b[4;31mABC", '<span class="HIR UND">ABC</span>')
self.assertWhiteBg("\x1b[4;31mABC", '<span class="HIR UND">ABC</span>')
self.assertBlackBg("\x1b[4mAB\x1b[24mC", '<span class="UND">AB</span>C')
self.assertWhiteBg("\x1b[4mAB\x1b[24mC", '<span class="UND">AB</span>C')
self.assertBlackBg(
"\x1b[4;31mAB\x1b[24mC",
'<span class="HIR UND">AB</span><span class="HIR">C</span>',
)
self.assertWhiteBg(
"\x1b[4;31mAB\x1b[24mC",
'<span class="HIR UND">AB</span><span class="HIR">C</span>',
)
self.assertBlackBg("\x1b[9mABC", '<span class="STR">ABC</span>')
self.assertWhiteBg("\x1b[9mABC", '<span class="STR">ABC</span>')
self.assertBlackBg("\x1b[9mAB\x1b[29mC", '<span class="STR">AB</span>C')
self.assertWhiteBg("\x1b[9mAB\x1b[29mC", '<span class="STR">AB</span>C')
self.assertBlackBg(
"\x1b[9;31mAB\x1b[29mC",
'<span class="HIR STR">AB</span><span class="HIR">C</span>',
)
self.assertWhiteBg(
"\x1b[9;31mAB\x1b[29mC",
'<span class="HIR STR">AB</span><span class="HIR">C</span>',
)
self.assertBlackBg("\x1b[9;31mABC", '<span class="HIR STR">ABC</span>')
self.assertWhiteBg("\x1b[9;31mABC", '<span class="HIR STR">ABC</span>')
self.assertBlackBg("\x1b[4;9mABC", '<span class="UNDSTR">ABC</span>')
self.assertWhiteBg("\x1b[4;9mABC", '<span class="UNDSTR">ABC</span>')
self.assertBlackBg("\x1b[4;9;31mABC", '<span class="HIR UNDSTR">ABC</span>')
self.assertWhiteBg("\x1b[4;9;31mABC", '<span class="HIR UNDSTR">ABC</span>')
self.assertBlackBg(
"\x1b[4;9mAB\x1b[24mC",
'<span class="UNDSTR">AB</span><span class="STR">C</span>',
)
self.assertWhiteBg(
"\x1b[4;9mAB\x1b[24mC",
'<span class="UNDSTR">AB</span><span class="STR">C</span>',
)
self.assertBlackBg(
"\x1b[4;9mAB\x1b[29mC",
'<span class="UNDSTR">AB</span><span class="UND">C</span>',
)
self.assertWhiteBg(
"\x1b[4;9mAB\x1b[29mC",
'<span class="UNDSTR">AB</span><span class="UND">C</span>',
)
# invert
def test_invert(self):
self.assertBlackBg("\x1b[7mABC", '<span class="BLK BWHI">ABC</span>')
self.assertWhiteBg("\x1b[7mABC", '<span class="WHI BBLK">ABC</span>')
self.assertBlackBg("\x1b[7mABC\r", '<span class="BLK BWHI">ABC</span>')
self.assertWhiteBg("\x1b[7mABC\r", '<span class="WHI BBLK">ABC</span>')
self.assertBlackBg("\x1b[30;7mABC", '<span class="BLK">ABC</span>')
self.assertWhiteBg("\x1b[30;7mABC", '<span class="WHI BBLK">ABC</span>')
self.assertBlackBg("\x1b[30;1;7mABC", '<span class="BLK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[30;1;7mABC", '<span class="HIW BBLK BOLD">ABC</span>')
self.assertBlackBg("\x1b[37;7mABC", '<span class="BLK BWHI">ABC</span>')
self.assertWhiteBg("\x1b[37;7mABC", '<span class="WHI">ABC</span>')
self.assertBlackBg("\x1b[37;1;7mABC", '<span class="BLK BWHI BOLD">ABC</span>')
self.assertWhiteBg("\x1b[37;1;7mABC", '<span class="HIW BOLD">ABC</span>')
self.assertBlackBg("\x1b[46;7mABC", '<span class="HIC BWHI">ABC</span>')
self.assertWhiteBg("\x1b[46;7mABC", '<span class="HIC BBLK">ABC</span>')
self.assertBlackBg("\x1b[46;1;7mABC", '<span class="HIC BWHI BOLD">ABC</span>')
self.assertWhiteBg("\x1b[46;1;7mABC", '<span class="HIC BBLK BOLD">ABC</span>')
self.assertBlackBg("\x1b[46;31;7mABC", '<span class="HIC BHIR">ABC</span>')
self.assertWhiteBg("\x1b[46;31;7mABC", '<span class="HIC BHIR">ABC</span>')
self.assertBlackBg(
"\x1b[46;31;7mAB\x1b[27mC",
'<span class="HIC BHIR">AB</span><span class="HIR BHIC">C</span>',
)
self.assertWhiteBg(
"\x1b[46;31;7mAB\x1b[27mC",
'<span class="HIC BHIR">AB</span><span class="HIR BHIC">C</span>',
)
self.assertBlackBg(
"\x1b[36;47;1;7mABC", '<span class="HIW BHIC BOLD">ABC</span>'
)
self.assertWhiteBg(
"\x1b[36;47;1;7mABC", '<span class="HIW BHIC BOLD">ABC</span>'
)
self.assertBlackBg("\x1b[36;47;2;7mABC", '<span class="BCYN">ABC</span>')
self.assertWhiteBg("\x1b[36;47;2;7mABC", '<span class="WHI BCYN">ABC</span>')
self.assertBlackBg("\x1b[36;47;2;1;7mABC", '<span class="BCYN BOLD">ABC</span>')
self.assertWhiteBg(
"\x1b[36;47;2;1;7mABC", '<span class="WHI BCYN BOLD">ABC</span>'
)
self.assertBlackBg("\x1b[90;7mABC", '<span class="BLK BHIK">ABC</span>')
self.assertWhiteBg("\x1b[90;7mABC", '<span class="WHI BHIK">ABC</span>')
self.assertBlackBg("\x1b[90;1;7mABC", '<span class="BLK BHIK BOLD">ABC</span>')
self.assertWhiteBg("\x1b[90;1;7mABC", '<span class="HIW BHIK BOLD">ABC</span>')
self.assertBlackBg("\x1b[100;7mABC", '<span class="HIK BWHI">ABC</span>')
self.assertWhiteBg("\x1b[100;7mABC", '<span class="HIK BBLK">ABC</span>')
self.assertBlackBg("\x1b[100;1;7mABC", '<span class="HIK BWHI BOLD">ABC</span>')
self.assertWhiteBg("\x1b[100;1;7mABC", '<span class="HIK BBLK BOLD">ABC</span>')
self.assertBlackBg("\x1b[38;5;120;7mABC", '<span class="BLK b120">ABC</span>')
self.assertWhiteBg("\x1b[38;5;120;7mABC", '<span class="WHI b120">ABC</span>')
self.assertBlackBg(
"\x1b[38;5;120;2;7mABC", '<span class="BLK db120">ABC</span>'
)
self.assertWhiteBg(
"\x1b[38;5;120;2;7mABC", '<span class="WHI db120">ABC</span>'
)
self.assertBlackBg("\x1b[48;5;120;7mABC", '<span class="f120 BWHI">ABC</span>')
self.assertWhiteBg("\x1b[48;5;120;7mABC", '<span class="f120 BBLK">ABC</span>')
self.assertBlackBg(
"\x1b[48;5;120;2;7mABC", '<span class="f120 BHIK">ABC</span>'
)
self.assertWhiteBg(
"\x1b[48;5;120;2;7mABC", '<span class="f120 BHIK">ABC</span>'
)
# vte uses BHIK here??
self.assertBlackBg("\x1b[47;30;1;7mABC", '<span class="HIW BOLD">ABC</span>')
self.assertWhiteBg(
"\x1b[47;30;1;7mABC", '<span class="HIW BBLK BOLD">ABC</span>'
)
# combining cursor movement and formatting
def test_movement_and_formatting(self):
self.assertBlackBg("\x1b[42m\tabc", ' <span class="BHIG">abc</span>')
self.assertWhiteBg("\x1b[42m\tabc", ' <span class="BHIG">abc</span>')
self.assertBlackBg("abc\x1b[42m\x1b[1Kabc", ' <span class="BHIG">abc</span>')
self.assertWhiteBg("abc\x1b[42m\x1b[1Kabc", ' <span class="BHIG">abc</span>')
self.assertBlackBg("\x1b[7m\tabc", ' <span class="BLK BWHI">abc</span>')
self.assertWhiteBg("\x1b[7m\tabc", ' <span class="WHI BBLK">abc</span>')
self.assertBlackBg(
"abc\x1b[7m\x1b[1Kabc", ' <span class="BLK BWHI">abc</span>'
)
self.assertWhiteBg(
"abc\x1b[7m\x1b[1Kabc", ' <span class="WHI BBLK">abc</span>'
)
class ANSI2TextTest(unittest.TestCase):
def assertAnsi(self, test, expected, **kwargs):
self.assertEqual("".join(ansi2text(test, **kwargs)), expected, repr(test))
# basic formatting tests
def test_basic(self):
self.assertAnsi("\tb", " b")
self.assertAnsi("\t\ta", " a")
self.assertAnsi("a\tb", "a b")
self.assertAnsi("ab\tc", "ab c")
self.assertAnsi("a\nbc", "a\nbc")
self.assertAnsi("a\f", "a\n" + ANSI2TextConverter.FF)
self.assertAnsi("a\n\f", "a\n\n" + ANSI2TextConverter.FF)
self.assertAnsi("<", "<")
self.assertAnsi("\x07", "\U00001F514")
# backspace and carriage return
def test_set_pos(self):
self.assertAnsi("abc\b\bBC", "aBC")
self.assertAnsi("a\b<", "<")
self.assertAnsi("<\ba", "a")
self.assertAnsi("a\b\bbc", "bc")
self.assertAnsi("a\rbc", "bc")
self.assertAnsi("a\nb\bc", "a\nc")
self.assertAnsi("a\t\bb", "a b")
self.assertAnsi("a\tb\b\bc", "a cb")
self.assertAnsi("01234567\r\tb", "01234567b")
# Escape sequences
def test_esc_parsing(self):
self.assertAnsi("{\x1b%}", "{}")
self.assertAnsi("{\x1b[0m}", "{}")
self.assertAnsi("{\x1b[m}", "{}")
self.assertAnsi("{\x1b[0;1;7;0m}", "{}")
self.assertAnsi("{\x1b[1;7m\x1b[m}", "{}")
self.assertAnsi("{\x1b]test\x1b\\}", "{}")
self.assertAnsi("{\x1b]test\x07}", "{}")
self.assertAnsi("{\x1b]test\x1b[0m\x07}", "{}")
self.assertAnsi("{\x1b]test\x1b[7m\x07}", "{}")
# ESC [C and ESC [D
def test_horiz_movement(self):
self.assertAnsi("abc\x1b[2DB", "aBc")
self.assertAnsi("abc\x1b[3CD", "abc D")
self.assertAnsi("abcd\x1b[3DB\x1b[1CD", "aBcD")
self.assertAnsi("abc\x1b[0CD", "abc D")
self.assertAnsi("abc\x1b[CD", "abc D")
# ESC [K
def test_clear_line(self):
self.assertAnsi("\x1b[Kabcd", "abcd")
self.assertAnsi("abcd\r\x1b[K", "")
self.assertAnsi("abcd\b\x1b[K", "abc")
self.assertAnsi("abcd\r\x1b[KDef", "Def")
self.assertAnsi("abcd\b\x1b[KDef", "abcDef")
self.assertAnsi("abcd\r\x1b[0K", "")
self.assertAnsi("abcd\b\x1b[0K", "abc")
self.assertAnsi("abcd\r\x1b[1K", "abcd")
self.assertAnsi("abcd\b\x1b[1K", " d")
self.assertAnsi("abcd\r\x1b[2K", "")
self.assertAnsi("abcd\b\x1b[2K", " ")
self.assertAnsi("abcd\r\x1b[2KDef", "Def")
self.assertAnsi("abcd\b\x1b[2KDef", " Def")
# combining cursor movement and formatting
def test_movement_and_formatting(self):
self.assertAnsi("\x1b[42m\tabc", " abc")
self.assertAnsi("abc\x1b[42m\x1b[1Kabc", " abc")
self.assertAnsi("\x1b[7m\tabc", " abc")
self.assertAnsi("abc\x1b[7m\x1b[1Kabc", " abc")
if __name__ == "__main__":
unittest.main()
| mit |
gicking/STM8_templates | Projects/General_Examples/I2C_read/build_upload.py | 16 | 3419 | #!/usr/bin/python
'''
Script for building and uploading a STM8 project with dependency auto-detection
'''
# set general options
UPLOAD = 'BSL' # select 'BSL' or 'SWIM'
TERMINAL = True # set True to open terminal after upload
RESET = 1 # STM8 reset: 0=skip, 1=manual, 2=DTR line (RS232), 3=send 'Re5eT!' @ 115.2kBaud, 4=Arduino pin 8, 5=Raspi pin 12
OPTIONS = '' # e.g. device for SPL ('-DSTM8S105', see stm8s.h)
# set path to root of STM8 templates
ROOT_DIR = '../../../'
LIB_ROOT = ROOT_DIR + 'Library/'
TOOL_DIR = ROOT_DIR + 'Tools/'
OBJDIR = 'output'
TARGET = 'main.ihx'
# set OS specific
import platform
if platform.system() == 'Windows':
PORT = 'COM10'
SWIM_PATH = 'C:/Programme/STMicroelectronics/st_toolset/stvp/'
SWIM_TOOL = 'ST-LINK'
SWIM_NAME = 'STM8S105x6' # STM8 Discovery
#SWIM_NAME = 'STM8S208xB' # muBoard
MAKE_TOOL = 'mingw32-make.exe'
else:
PORT = '/dev/ttyUSB0'
SWIM_TOOL = 'stlink'
SWIM_NAME = 'stm8s105c6' # STM8 Discovery
#SWIM_NAME = 'stm8s208?b' # muBoard
MAKE_TOOL = 'make'
# import required modules
import sys
import os
import platform
import argparse
sys.path.insert(0,TOOL_DIR) # assert that TOOL_DIR is searched first
import misc
from buildProject import createMakefile, buildProject
from uploadHex import stm8gal, stm8flash, STVP
##################
# main program
##################
# commandline parameters with defaults
parser = argparse.ArgumentParser(description="compile and upload STM8 project")
parser.add_argument("--skipmakefile", default=False, action="store_true" , help="skip creating Makefile")
parser.add_argument("--skipbuild", default=False, action="store_true" , help="skip building project")
parser.add_argument("--skipupload", default=False, action="store_true" , help="skip uploading hexfile")
parser.add_argument("--skipterminal", default=False, action="store_true" , help="skip opening terminal")
parser.add_argument("--skippause", default=False, action="store_true" , help="skip pause before exit")
args = parser.parse_args()
# create Makefile
if args.skipmakefile == False:
createMakefile(workdir='.', libroot=LIB_ROOT, outdir=OBJDIR, target=TARGET, options=OPTIONS)
# build target
if args.skipbuild == False:
buildProject(workdir='.', make=MAKE_TOOL)
# upload code via UART bootloader
if args.skipupload == False:
if UPLOAD == 'BSL':
stm8gal(tooldir=TOOL_DIR, port=PORT, outdir=OBJDIR, target=TARGET, reset=RESET)
# upload code via SWIM. Use stm8flash on Linux, STVP on Windows (due to libusb issues)
if UPLOAD == 'SWIM':
if platform.system() == 'Windows':
STVP(tooldir=SWIM_PATH, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
else:
stm8flash(tooldir=TOOL_DIR, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
# if specified open serial console after upload
if args.skipterminal == False:
if TERMINAL == True:
cmd = 'python '+TOOL_DIR+'terminal.py -p '+PORT
exitcode = os.system(cmd)
if (exitcode != 0):
sys.stderr.write('error '+str(exitcode)+'\n\n')
misc.Exit(exitcode)
# wait for return, then close window
if args.skippause == False:
if (sys.version_info.major == 3):
input("\npress return to exit ... ")
else:
raw_input("\npress return to exit ... ")
sys.stdout.write('\n\n')
# END OF MODULE
| apache-2.0 |
nuagenetworks/vspk-python | vspk/v6/nusapegressqosprofile.py | 1 | 14126 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUSAPEgressQoSProfile(NURESTObject):
""" Represents a SAPEgressQoSProfile in the VSD
Notes:
7x50 SAP Egress QoS profile
"""
__rest_name__ = "sapegressqosprofile"
__resource_name__ = "sapegressqosprofiles"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_BLOB_TYPE_SR_LINUX = "SR_LINUX"
def __init__(self, **kwargs):
""" Initializes a SAPEgressQoSProfile instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> sapegressqosprofile = NUSAPEgressQoSProfile(id=u'xxxx-xxx-xxx-xxx', name=u'SAPEgressQoSProfile')
>>> sapegressqosprofile = NUSAPEgressQoSProfile(data=my_dict)
"""
super(NUSAPEgressQoSProfile, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._description = None
self._blob_text = None
self._blob_type = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._assoc_entity_type = None
self._customer_id = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="blob_text", remote_name="blobText", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="blob_type", remote_name="blobType", attribute_type=str, is_required=False, is_unique=False, choices=[u'SR_LINUX'])
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="customer_id", remote_name="customerID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name of the Egress QoS Profile entity.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the Egress QoS Profile entity.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def description(self):
""" Get description value.
Notes:
A detailed description of the Egress QoS Profile entity.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A detailed description of the Egress QoS Profile entity.
"""
self._description = value
@property
def blob_text(self):
""" Get blob_text value.
Notes:
Data definitions for pre-defined sets of attributes defined by blobType.
This attribute is named `blobText` in VSD API.
"""
return self._blob_text
@blob_text.setter
def blob_text(self, value):
""" Set blob_text value.
Notes:
Data definitions for pre-defined sets of attributes defined by blobType.
This attribute is named `blobText` in VSD API.
"""
self._blob_text = value
@property
def blob_type(self):
""" Get blob_type value.
Notes:
Content type for blob text.
This attribute is named `blobType` in VSD API.
"""
return self._blob_type
@blob_type.setter
def blob_type(self, value):
""" Set blob_type value.
Notes:
Content type for blob text.
This attribute is named `blobType` in VSD API.
"""
self._blob_type = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of the entity to which the Profile belongs to.
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of the entity to which the Profile belongs to.
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def customer_id(self):
""" Get customer_id value.
Notes:
The customer ID given to parent enterprise. This is used by Netconf/Config manager.
This attribute is named `customerID` in VSD API.
"""
return self._customer_id
@customer_id.setter
def customer_id(self, value):
""" Set customer_id value.
Notes:
The customer ID given to parent enterprise. This is used by Netconf/Config manager.
This attribute is named `customerID` in VSD API.
"""
self._customer_id = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause |
openstack/ceilometer | ceilometer/tests/unit/test_event_pipeline.py | 1 | 14208 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import traceback
import uuid
import fixtures
from ceilometer.event import models
from ceilometer.pipeline import base as pipeline
from ceilometer.pipeline import event
from ceilometer import publisher
from ceilometer.publisher import test as test_publisher
from ceilometer import service
from ceilometer.tests import base
class EventPipelineTestCase(base.BaseTestCase):
def get_publisher(self, conf, url, namespace=''):
fake_drivers = {'test://': test_publisher.TestPublisher,
'new://': test_publisher.TestPublisher,
'except://': self.PublisherClassException}
return fake_drivers[url](conf, url)
class PublisherClassException(publisher.ConfigPublisherBase):
def publish_samples(self, samples):
pass
def publish_events(self, events):
raise Exception()
def setUp(self):
super(EventPipelineTestCase, self).setUp()
self.CONF = service.prepare_service([], [])
self.test_event = models.Event(
message_id=uuid.uuid4(),
event_type='a',
generated=datetime.datetime.utcnow(),
traits=[
models.Trait('t_text', 1, 'text_trait'),
models.Trait('t_int', 2, 'int_trait'),
models.Trait('t_float', 3, 'float_trait'),
models.Trait('t_datetime', 4, 'datetime_trait')
],
raw={'status': 'started'}
)
self.test_event2 = models.Event(
message_id=uuid.uuid4(),
event_type='b',
generated=datetime.datetime.utcnow(),
traits=[
models.Trait('t_text', 1, 'text_trait'),
models.Trait('t_int', 2, 'int_trait'),
models.Trait('t_float', 3, 'float_trait'),
models.Trait('t_datetime', 4, 'datetime_trait')
],
raw={'status': 'stopped'}
)
self.useFixture(fixtures.MockPatchObject(
publisher, 'get_publisher', side_effect=self.get_publisher))
self._setup_pipeline_cfg()
self._reraise_exception = True
self.useFixture(fixtures.MockPatch(
'ceilometer.pipeline.base.LOG.exception',
side_effect=self._handle_reraise_exception))
def _handle_reraise_exception(self, *args, **kwargs):
if self._reraise_exception:
raise Exception(traceback.format_exc())
def _setup_pipeline_cfg(self):
"""Setup the appropriate form of pipeline config."""
source = {'name': 'test_source',
'events': ['a'],
'sinks': ['test_sink']}
sink = {'name': 'test_sink',
'publishers': ['test://']}
self.pipeline_cfg = {'sources': [source], 'sinks': [sink]}
def _augment_pipeline_cfg(self):
"""Augment the pipeline config with an additional element."""
self.pipeline_cfg['sources'].append({
'name': 'second_source',
'events': ['b'],
'sinks': ['second_sink']
})
self.pipeline_cfg['sinks'].append({
'name': 'second_sink',
'publishers': ['new://'],
})
def _break_pipeline_cfg(self):
"""Break the pipeline config with a malformed element."""
self.pipeline_cfg['sources'].append({
'name': 'second_source',
'events': ['b'],
'sinks': ['second_sink']
})
self.pipeline_cfg['sinks'].append({
'name': 'second_sink',
'publishers': ['except'],
})
def _dup_pipeline_name_cfg(self):
"""Break the pipeline config with duplicate pipeline name."""
self.pipeline_cfg['sources'].append({
'name': 'test_source',
'events': ['a'],
'sinks': ['test_sink']
})
def _set_pipeline_cfg(self, field, value):
if field in self.pipeline_cfg['sources'][0]:
self.pipeline_cfg['sources'][0][field] = value
else:
self.pipeline_cfg['sinks'][0][field] = value
def _extend_pipeline_cfg(self, field, value):
if field in self.pipeline_cfg['sources'][0]:
self.pipeline_cfg['sources'][0][field].extend(value)
else:
self.pipeline_cfg['sinks'][0][field].extend(value)
def _unset_pipeline_cfg(self, field):
if field in self.pipeline_cfg['sources'][0]:
del self.pipeline_cfg['sources'][0][field]
else:
del self.pipeline_cfg['sinks'][0][field]
def _build_and_set_new_pipeline(self):
name = self.cfg2file(self.pipeline_cfg)
self.CONF.set_override('event_pipeline_cfg_file', name)
def _exception_create_pipelinemanager(self):
self._build_and_set_new_pipeline()
self.assertRaises(pipeline.PipelineException,
event.EventPipelineManager, self.CONF)
def test_no_events(self):
self._unset_pipeline_cfg('events')
self._exception_create_pipelinemanager()
def test_no_name(self):
self._unset_pipeline_cfg('name')
self._exception_create_pipelinemanager()
def test_name(self):
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
for pipe in pipeline_manager.pipelines:
self.assertTrue(pipe.name.startswith('event:'))
def test_no_publishers(self):
self._unset_pipeline_cfg('publishers')
self._exception_create_pipelinemanager()
def test_check_events_include_exclude_same(self):
event_cfg = ['a', '!a']
self._set_pipeline_cfg('events', event_cfg)
self._exception_create_pipelinemanager()
def test_check_events_include_exclude(self):
event_cfg = ['a', '!b']
self._set_pipeline_cfg('events', event_cfg)
self._exception_create_pipelinemanager()
def test_check_events_wildcard_included(self):
event_cfg = ['a', '*']
self._set_pipeline_cfg('events', event_cfg)
self._exception_create_pipelinemanager()
def test_check_publishers_invalid_publisher(self):
publisher_cfg = ['test_invalid']
self._set_pipeline_cfg('publishers', publisher_cfg)
def test_multiple_included_events(self):
event_cfg = ['a', 'b']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.events))
with pipeline_manager.publisher() as p:
p([self.test_event2])
self.assertEqual(2, len(publisher.events))
self.assertEqual('a', getattr(publisher.events[0], 'event_type'))
self.assertEqual('b', getattr(publisher.events[1], 'event_type'))
def test_event_non_match(self):
event_cfg = ['nomatch']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.events))
self.assertEqual(0, publisher.calls)
def test_wildcard_event(self):
event_cfg = ['*']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.events))
self.assertEqual('a', getattr(publisher.events[0], 'event_type'))
def test_wildcard_excluded_events(self):
event_cfg = ['*', '!a']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
pipe = pipeline_manager.pipelines[0]
self.assertFalse(pipe.source.support_event('a'))
def test_wildcard_excluded_events_not_excluded(self):
event_cfg = ['*', '!b']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.events))
self.assertEqual('a', getattr(publisher.events[0], 'event_type'))
def test_all_excluded_events_not_excluded(self):
event_cfg = ['!b', '!c']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.events))
self.assertEqual('a', getattr(publisher.events[0], 'event_type'))
def test_all_excluded_events_excluded(self):
event_cfg = ['!a', '!c']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
pipe = pipeline_manager.pipelines[0]
self.assertFalse(pipe.source.support_event('a'))
self.assertTrue(pipe.source.support_event('b'))
self.assertFalse(pipe.source.support_event('c'))
def test_wildcard_and_excluded_wildcard_events(self):
event_cfg = ['*', '!compute.*']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
pipe = pipeline_manager.pipelines[0]
self.assertFalse(pipe.source.
support_event('compute.instance.create.start'))
self.assertTrue(pipe.source.support_event('identity.user.create'))
def test_included_event_and_wildcard_events(self):
event_cfg = ['compute.instance.create.start', 'identity.*']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
pipe = pipeline_manager.pipelines[0]
self.assertTrue(pipe.source.support_event('identity.user.create'))
self.assertTrue(pipe.source.
support_event('compute.instance.create.start'))
self.assertFalse(pipe.source.
support_event('compute.instance.create.stop'))
def test_excluded_event_and_excluded_wildcard_events(self):
event_cfg = ['!compute.instance.create.start', '!identity.*']
self._set_pipeline_cfg('events', event_cfg)
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
pipe = pipeline_manager.pipelines[0]
self.assertFalse(pipe.source.support_event('identity.user.create'))
self.assertFalse(pipe.source.
support_event('compute.instance.create.start'))
self.assertTrue(pipe.source.
support_event('compute.instance.create.stop'))
def test_multiple_pipeline(self):
self._augment_pipeline_cfg()
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event, self.test_event2])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.events))
self.assertEqual(1, publisher.calls)
self.assertEqual('a', getattr(publisher.events[0], 'event_type'))
new_publisher = pipeline_manager.pipelines[1].publishers[0]
self.assertEqual(1, len(new_publisher.events))
self.assertEqual(1, new_publisher.calls)
self.assertEqual('b', getattr(new_publisher.events[0], 'event_type'))
def test_multiple_publisher(self):
self._set_pipeline_cfg('publishers', ['test://', 'new://'])
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event])
publisher = pipeline_manager.pipelines[0].publishers[0]
new_publisher = pipeline_manager.pipelines[0].publishers[1]
self.assertEqual(1, len(publisher.events))
self.assertEqual(1, len(new_publisher.events))
self.assertEqual('a', getattr(new_publisher.events[0], 'event_type'))
self.assertEqual('a', getattr(publisher.events[0], 'event_type'))
def test_multiple_publisher_isolation(self):
self._reraise_exception = False
self._set_pipeline_cfg('publishers', ['except://', 'new://'])
self._build_and_set_new_pipeline()
pipeline_manager = event.EventPipelineManager(self.CONF)
with pipeline_manager.publisher() as p:
p([self.test_event])
publisher = pipeline_manager.pipelines[0].publishers[1]
self.assertEqual(1, len(publisher.events))
self.assertEqual('a', getattr(publisher.events[0], 'event_type'))
def test_unique_pipeline_names(self):
self._dup_pipeline_name_cfg()
self._exception_create_pipelinemanager()
| apache-2.0 |
saeki-masaki/glance | glance/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py | 9 | 3450 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import (
Column, ForeignKey, Index, MetaData, Table, UniqueConstraint)
from glance.db.sqlalchemy.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, create_tables, drop_tables,
from_migration_import) # noqa
def define_image_properties_table(meta):
(define_images_table,) = from_migration_import(
'001_add_images_table', ['define_images_table'])
images = define_images_table(meta) # noqa
# NOTE(dperaza) DB2: specify the UniqueConstraint option when creating the
# table will cause an index being created to specify the index
# name and skip the step of creating another index with the same columns.
# The index name is needed so it can be dropped and re-created later on.
constr_kwargs = {}
if meta.bind.name == 'ibm_db_sa':
constr_kwargs['name'] = 'ix_image_properties_image_id_key'
image_properties = Table('image_properties',
meta,
Column('id',
Integer(),
primary_key=True,
nullable=False),
Column('image_id',
Integer(),
ForeignKey('images.id'),
nullable=False,
index=True),
Column('key', String(255), nullable=False),
Column('value', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted',
Boolean(),
nullable=False,
default=False,
index=True),
UniqueConstraint('image_id', 'key',
**constr_kwargs),
mysql_engine='InnoDB',
mysql_charset='utf8',
extend_existing=True)
if meta.bind.name != 'ibm_db_sa':
Index('ix_image_properties_image_id_key',
image_properties.c.image_id,
image_properties.c.key)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_image_properties_table(meta)]
create_tables(tables)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
tables = [define_image_properties_table(meta)]
drop_tables(tables)
| apache-2.0 |
MichalMaM/ella-galleries | ella_galleries/south_migrations/0008_auto__add_field_galleryitem_slug.py | 2 | 11553 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GalleryItem.slug'
db.add_column('ella_galleries_galleryitem', 'slug',
self.gf('django.db.models.fields.SlugField')(max_length=255, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GalleryItem.slug'
db.delete_column('ella_galleries_galleryitem', 'slug')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.author': {
'Meta': {'object_name': 'Author'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photos.Photo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.category': {
'Meta': {'unique_together': "(('site', 'tree_path'),)", 'object_name': 'Category'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
'content': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'category.html'", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Category']", 'null': 'True', 'blank': 'True'}),
'tree_path': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.publishable': {
'Meta': {'object_name': 'Publishable'},
'announced': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Author']", 'symmetrical': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Category']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photos.Photo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'publish_from': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2999, 12, 31, 0, 0)', 'db_index': 'True'}),
'publish_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Source']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'static': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.source': {
'Meta': {'object_name': 'Source'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'ella_galleries.gallery': {
'Meta': {'object_name': 'Gallery', '_ormbases': ['core.Publishable']},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'publishable_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Publishable']", 'unique': 'True', 'primary_key': 'True'})
},
'ella_galleries.galleryitem': {
'Meta': {'object_name': 'GalleryItem'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ella_galleries.Gallery']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photos.Photo']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'photos.photo': {
'Meta': {'object_name': 'Photo'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'photo_set'", 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'important_bottom': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_left': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_right': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_top': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Source']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['ella_galleries'] | bsd-3-clause |
weimingtom/python-for-android | python-modules/twisted/twisted/internet/tksupport.py | 61 | 1716 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module integrates Tkinter with twisted.internet's mainloop.
Maintainer: Itamar Shtull-Trauring
To use, do::
| tksupport.install(rootWidget)
and then run your reactor as usual - do *not* call Tk's mainloop(),
use Twisted's regular mechanism for running the event loop.
Likewise, to stop your program you will need to stop Twisted's
event loop. For example, if you want closing your root widget to
stop Twisted::
| root.protocol('WM_DELETE_WINDOW', reactor.stop)
"""
# system imports
import Tkinter, tkSimpleDialog, tkMessageBox
# twisted imports
from twisted.python import log
from twisted.internet import task
_task = None
def install(widget, ms=10, reactor=None):
"""Install a Tkinter.Tk() object into the reactor."""
installTkFunctions()
global _task
_task = task.LoopingCall(widget.update)
_task.start(ms / 1000.0, False)
def uninstall():
"""Remove the root Tk widget from the reactor.
Call this before destroy()ing the root widget.
"""
global _task
_task.stop()
_task = None
def installTkFunctions():
import twisted.python.util
twisted.python.util.getPassword = getPassword
def getPassword(prompt = '', confirm = 0):
while 1:
try1 = tkSimpleDialog.askstring('Password Dialog', prompt, show='*')
if not confirm:
return try1
try2 = tkSimpleDialog.askstring('Password Dialog', 'Confirm Password', show='*')
if try1 == try2:
return try1
else:
tkMessageBox.showerror('Password Mismatch', 'Passwords did not match, starting over')
__all__ = ["install", "uninstall"]
| apache-2.0 |
deadmau6/Prog5 | network.py | 1 | 9349 | '''
Created on Oct 12, 2016
@author: mwitt_000
'''
import queue
import threading
## wrapper class for a queue of packets
class Interface:
## @param maxsize - the maximum size of the queue storing packets
# @param cost - of the interface used in routing
# @param capacity - the capacity of the link in bps
def __init__(self, cost=0, maxsize=0, capacity=500):
self.in_queue = queue.Queue(maxsize);
self.out_queue = queue.Queue(maxsize);
self.cost = cost
self.capacity = capacity #serialization rate
self.next_avail_time = 0 #the next time the interface can transmit a packet
##get packet from the queue interface
# @param in_or_out - use 'in' or 'out' interface
def get(self, in_or_out):
try:
if in_or_out == 'in':
pkt_S = self.in_queue.get(False)
# if pkt_S is not None:
# print('getting packet from the IN queue')
return pkt_S
else:
pkt_S = self.out_queue.get(False)
# if pkt_S is not None:
# print('getting packet from the OUT queue')
return pkt_S
except queue.Empty:
return None
##put the packet into the interface queue
# @param pkt - Packet to be inserted into the queue
# @param in_or_out - use 'in' or 'out' interface
# @param block - if True, block until room in queue, if False may throw queue.Full exception
def put(self, pkt, in_or_out, block=False):
if in_or_out == 'out':
# print('putting packet in the OUT queue')
self.out_queue.put(pkt, block)
else:
# print('putting packet in the IN queue')
self.in_queue.put(pkt, block)
## Implements a network layer packet (different from the RDT packet
# from programming assignment 2).
# NOTE: This class will need to be extended to for the packet to include
# the fields necessary for the completion of this assignment.
class NetworkPacket:
## packet encoding lengths
dst_addr_S_length = 5
prot_S_length = 1
##@param dst_addr: address of the destination host
# @param data_S: packet payload
# @param prot_S: upper layer protocol for the packet (data, or control)
def __init__(self, dst_addr, prot_S, data_S):
self.dst_addr = dst_addr
self.data_S = data_S
self.prot_S = prot_S
## called when printing the object
def __str__(self):
return self.to_byte_S()
## convert packet to a byte string for transmission over links
def to_byte_S(self):
byte_S = str(self.dst_addr).zfill(self.dst_addr_S_length)
if self.prot_S == 'data':
byte_S += '1'
elif self.prot_S == 'control':
byte_S += '2'
else:
raise('%s: unknown prot_S option: %s' %(self, self.prot_S))
byte_S += self.data_S
return byte_S
## extract a packet object from a byte string
# @param byte_S: byte string representation of the packet
@classmethod
def from_byte_S(self, byte_S):
dst_addr = int(byte_S[0 : NetworkPacket.dst_addr_S_length])
prot_S = byte_S[NetworkPacket.dst_addr_S_length : NetworkPacket.dst_addr_S_length + NetworkPacket.prot_S_length]
if prot_S == '1':
prot_S = 'data'
elif prot_S == '2':
prot_S = 'control'
else:
raise('%s: unknown prot_S field: %s' %(self, prot_S))
data_S = byte_S[NetworkPacket.dst_addr_S_length + NetworkPacket.prot_S_length : ]
return self(dst_addr, prot_S, data_S)
## Implements a network host for receiving and transmitting data
class Host:
##@param addr: address of this node represented as an integer
def __init__(self, addr):
self.addr = addr
self.intf_L = [Interface()]
self.stop = False #for thread termination
## called when printing the object
def __str__(self):
return 'Host_%s' % (self.addr)
## create a packet and enqueue for transmission
# @param dst_addr: destination address for the packet
# @param data_S: data being transmitted to the network layer
# @param priority: packet priority
def udt_send(self, dst_addr, data_S, priority=0):
p = NetworkPacket(dst_addr, 'data', data_S)
print('%s: sending packet "%s"' % (self, p))
self.intf_L[0].put(p.to_byte_S(), 'out') #send packets always enqueued successfully
## receive packet from the network layer
def udt_receive(self):
pkt_S = self.intf_L[0].get('in')
if pkt_S is not None:
print('%s: received packet "%s"' % (self, pkt_S))
## thread target for the host to keep receiving data
def run(self):
print (threading.currentThread().getName() + ': Starting')
while True:
#receive data arriving to the in interface
self.udt_receive()
#terminate
if(self.stop):
print (threading.currentThread().getName() + ': Ending')
return
## Implements a multi-interface router described in class
class Router:
##@param name: friendly router name for debugging
# @param intf_cost_L: outgoing cost of interfaces (and interface number)
# @param intf_capacity_L: capacities of outgoing interfaces in bps
# @param rt_tbl_D: routing table dictionary (starting reachability), eg. {1: {1: 1}} # packet to host 1 through interface 1 for cost 1
# @param max_queue_size: max queue length (passed to Interface)
def __init__(self, name, intf_cost_L, intf_capacity_L, rt_tbl_D, max_queue_size):
self.stop = False #for thread termination
self.name = name
#create a list of interfaces
#note the number of interfaces is set up by out_intf_cost_L
assert(len(intf_cost_L) == len(intf_capacity_L))
self.intf_L = []
for i in range(len(intf_cost_L)):
self.intf_L.append(Interface(intf_cost_L[i], max_queue_size, intf_capacity_L[i]))
#set up the routing table for connected hosts
self.rt_tbl_D = rt_tbl_D
## called when printing the object
def __str__(self):
return 'Router_%s' % (self.name)
## look through the content of incoming interfaces and
# process data and control packets
def process_queues(self):
for i in range(len(self.intf_L)):
pkt_S = None
#get packet from interface i
pkt_S = self.intf_L[i].get('in')
#if packet exists make a forwarding decision
if pkt_S is not None:
p = NetworkPacket.from_byte_S(pkt_S) #parse a packet out
if p.prot_S == 'data':
self.forward_packet(p,i)
elif p.prot_S == 'control':
self.update_routes(p, i)
else:
raise Exception('%s: Unknown packet type in packet %s' % (self, p))
## forward the packet according to the routing table
# @param p Packet to forward
# @param i Incoming interface number for packet p
def forward_packet(self, p, i):
try:
# TODO: Here you will need to implement a lookup into the
# forwarding table to find the appropriate outgoing interface
# for now we assume the outgoing interface is (i+1)%2
self.intf_L[(i+1)%2].put(p.to_byte_S(), 'out', True)
print('%s: forwarding packet "%s" from interface %d to %d' % (self, p, i, (i+1)%2))
except queue.Full:
print('%s: packet "%s" lost on interface %d' % (self, p, i))
pass
## forward the packet according to the routing table
# @param p Packet containing routing information
def update_routes(self, p, i):
#TODO: add logic to update the routing tables and
# possibly send out routing updates
print('%s: Received routing update %s from interface %d' % (self, p, i))
## send out route update
# @param i Interface number on which to send out a routing update
def send_routes(self, i):
# a sample route update packet
p = NetworkPacket(0, 'control', 'Sample routing table packet')
try:
#TODO: add logic to send out a route update
print('%s: sending routing update "%s" from interface %d' % (self, p, i))
self.intf_L[i].put(p.to_byte_S(), 'out', True)
except queue.Full:
print('%s: packet "%s" lost on interface %d' % (self, p, i))
pass
## Print routing table
def print_routes(self):
print('%s: routing table' % self)
#TODO: print the routes as a two dimensional table for easy inspection
# Currently the function just prints the route table as a dictionary
print(self.rt_tbl_D)
## thread target for the host to keep forwarding data
def run(self):
print (threading.currentThread().getName() + ': Starting')
while True:
self.process_queues()
if self.stop:
print (threading.currentThread().getName() + ': Ending')
return | gpl-3.0 |
dhruvagarwal/django | tests/template_tests/filter_tests/test_linebreaks.py | 310 | 1920 | from django.template.defaultfilters import linebreaks_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LinebreaksTests(SimpleTestCase):
"""
The contents in "linebreaks" are escaped according to the current
autoescape setting.
"""
@setup({'linebreaks01': '{{ a|linebreaks }} {{ b|linebreaks }}'})
def test_linebreaks01(self):
output = self.engine.render_to_string('linebreaks01', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "<p>x&<br />y</p> <p>x&<br />y</p>")
@setup({'linebreaks02':
'{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}{% endautoescape %}'})
def test_linebreaks02(self):
output = self.engine.render_to_string('linebreaks02', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "<p>x&<br />y</p> <p>x&<br />y</p>")
class FunctionTests(SimpleTestCase):
def test_line(self):
self.assertEqual(linebreaks_filter('line 1'), '<p>line 1</p>')
def test_newline(self):
self.assertEqual(linebreaks_filter('line 1\nline 2'), '<p>line 1<br />line 2</p>')
def test_carriage(self):
self.assertEqual(linebreaks_filter('line 1\rline 2'), '<p>line 1<br />line 2</p>')
def test_carriage_newline(self):
self.assertEqual(linebreaks_filter('line 1\r\nline 2'), '<p>line 1<br />line 2</p>')
def test_non_string_input(self):
self.assertEqual(linebreaks_filter(123), '<p>123</p>')
def test_autoescape(self):
self.assertEqual(
linebreaks_filter('foo\n<a>bar</a>\nbuz'),
'<p>foo<br /><a>bar</a><br />buz</p>',
)
def test_autoescape_off(self):
self.assertEqual(
linebreaks_filter('foo\n<a>bar</a>\nbuz', autoescape=False),
'<p>foo<br /><a>bar</a><br />buz</p>',
)
| bsd-3-clause |
xzh86/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
mosbasik/buzhug | javasrc/lib/Jython/Lib/test/test_pkg_jy.py | 5 | 3082 | # Test packages (dotted-name import)
# XXX: This test is borrowed from CPython 2.7 as it tickles
# http://bugs.jython.org/issue1871 so it should be removed in Jython 2.7
import sys
import os
import tempfile
import textwrap
import unittest
from test import test_support
# Helpers to create and destroy hierarchies.
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
return lst
class Test(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
def tearDown(self):
sys.path[:] = self.syspath
if self.root: # Only clean if the test was actually run
cleanout(self.root)
# delete all modules concerning the tested hierarchy
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
# package name is the name of the first item
self.pkgname = descr[0][0]
def test_5(self):
hier = [
("t5", None),
("t5 __init__"+os.extsep+"py", "import t5.foo"),
("t5 string"+os.extsep+"py", "spam = 1"),
("t5 foo"+os.extsep+"py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__doc__', '__file__', '__name__',
'__path__', 'foo', 'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__doc__', '__file__', '__name__',
'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__doc__', '__file__', '__name__',
'spam'])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
bo01ean/Stino | stino/pyarduino/base/pyserial/serialposix.py | 18 | 24318 | #!/usr/bin/env python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# module for serial IO for POSIX compatible systems, like Linux
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <[email protected]>
# this is distributed under a free software license, see license.txt
#
# parts based on code from Grant B. Edwards <[email protected]>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
import sys, os, fcntl, termios, struct, select, errno, time
from .serialutil import *
# Do check the Python version as some constants have moved.
if (sys.hexversion < 0x020100f0):
import TERMIOS
else:
TERMIOS = termios
if (sys.hexversion < 0x020200f0):
import FCNTL
else:
FCNTL = fcntl
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed)
def device(port):
return '/dev/ttyS%d' % port
ASYNC_SPD_MASK = 0x1030
ASYNC_SPD_CUST = 0x0030
def set_special_baudrate(port, baudrate):
import array
buf = array.array('i', [0] * 32)
# get serial_struct
FCNTL.ioctl(port.fd, TERMIOS.TIOCGSERIAL, buf)
# set custom divisor
buf[6] = buf[7] / baudrate
# update flags
buf[4] &= ~ASYNC_SPD_MASK
buf[4] |= ASYNC_SPD_CUST
# set serial_struct
try:
res = FCNTL.ioctl(port.fd, TERMIOS.TIOCSSERIAL, buf)
except IOError:
raise ValueError('Failed to set custom baud rate: %r' % baudrate)
baudrate_constants = {
0: 0, # hang up
50: 1,
75: 2,
110: 3,
134: 4,
150: 5,
200: 6,
300: 7,
600: 10,
1200: 11,
1800: 12,
2400: 13,
4800: 14,
9600: 15,
19200: 16,
38400: 17,
57600: 10001,
115200: 10002,
230400: 10003,
460800: 10004,
500000: 10005,
576000: 10006,
921600: 10007,
1000000: 10010,
1152000: 10011,
1500000: 10012,
2000000: 10013,
2500000: 10014,
3000000: 10015,
3500000: 10016,
4000000: 10017
}
elif plat == 'cygwin': # cygwin/win32 (confirmed)
def device(port):
return '/dev/com%d' % (port + 1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat == 'openbsd3': # BSD (confirmed)
def device(port):
return '/dev/ttyp%d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'bsd' or \
plat[:7] == 'freebsd' or \
plat[:7] == 'openbsd': # BSD (confirmed for freebsd4: cuaa%d)
def device(port):
return '/dev/cuad%d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:6] == 'darwin': # OS X
version = os.uname()[2].split('.')
# Tiger or above can support arbitrary serial speeds
if int(version[0]) >= 8:
def set_special_baudrate(port, baudrate):
# use IOKit-specific call to set up high speeds
import array, fcntl
buf = array.array('i', [baudrate])
IOSSIOSPEED = 0x80045402 #_IOW('T', 2, speed_t)
fcntl.ioctl(port.fd, IOSSIOSPEED, buf, 1)
else: # version < 8
def set_special_baudrate(port, baudrate):
raise ValueError("baud rate not supported")
def device(port):
return '/dev/cuad%d' % port
baudrate_constants = {}
elif plat[:6] == 'netbsd': # NetBSD 1.6 testing by Erk
def device(port):
return '/dev/dty%02d' % port
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:4] == 'irix': # IRIX (partially tested)
def device(port):
return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:2] == 'hp': # HP-UX (not tested)
def device(port):
return '/dev/tty%dp0' % (port+1)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:5] == 'sunos': # Solaris/SunOS (confirmed)
def device(port):
return '/dev/tty%c' % (ord('a')+port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
elif plat[:3] == 'aix': # AIX
def device(port):
return '/dev/tty%d' % (port)
def set_special_baudrate(port, baudrate):
raise ValueError("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
else:
# platform detection has failed...
sys.stderr.write("""\
don't know how to number ttys on this system.
! Use an explicit path (eg /dev/ttyS1) or send this information to
! the author of this module:
sys.platform = %r
os.name = %r
serialposix.py version = %s
also add the device name of the serial port and where the
counting starts for the first serial port.
e.g. 'first serial port: /dev/ttyS0'
and with a bit luck you can get this module running...
""" % (sys.platform, os.name, VERSION))
# no exception, just continue with a brave attempt to build a device name
# even if the device name is not correct for the platform it has chances
# to work using a string with the real device name as port parameter.
def device(portum):
return '/dev/ttyS%d' % portnum
def set_special_baudrate(port, baudrate):
raise SerialException("sorry don't know how to handle non standard baud rate on this platform")
baudrate_constants = {}
#~ raise Exception, "this module does not run on this platform, sorry."
# whats up with "aix", "beos", ....
# they should work, just need to know the device names.
# load some constants for later use.
# try to use values from TERMIOS, use defaults from linux otherwise
TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415
TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416
TIOCMBIC = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417
TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418
#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001
TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002
TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004
#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008
#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010
TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020
TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040
TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080
TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100
TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR
TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG
#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000
#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000
TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B
TIOCM_zero_str = struct.pack('I', 0)
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
TIOCSBRK = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427
TIOCCBRK = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428
class PosixSerial(SerialBase):
"""Serial port class POSIX implementation. Serial port configuration is
done with termios and fcntl. Runs on Linux and many other Un*x like
systems."""
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
self.fd = None
# open
try:
self.fd = os.open(self.portstr, os.O_RDWR|os.O_NOCTTY|os.O_NONBLOCK)
except Exception as msg:
self.fd = None
raise SerialException("could not open port %s: %s" % (self._port, msg))
#~ fcntl.fcntl(self.fd, FCNTL.F_SETFL, 0) # set blocking
try:
self._reconfigurePort()
except:
try:
os.close(self.fd)
except:
# ignore any exception when closing the port
# also to keep original exception that happened when setting up
pass
self.fd = None
raise
else:
self._isOpen = True
#~ self.flushInput()
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if self.fd is None:
raise SerialException("Can only operate on a valid file descriptor")
custom_baud = None
vmin = vtime = 0 # timeout is done via select
if self._interCharTimeout is not None:
vmin = 1
vtime = int(self._interCharTimeout * 10)
try:
orig_attr = termios.tcgetattr(self.fd)
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = orig_attr
except termios.error as msg: # if a port is nonexistent but has a /dev file, it'll fail here
raise SerialException("Could not configure port: %s" % msg)
# set up raw mode / no echo / binary
cflag |= (TERMIOS.CLOCAL|TERMIOS.CREAD)
lflag &= ~(TERMIOS.ICANON|TERMIOS.ECHO|TERMIOS.ECHOE|TERMIOS.ECHOK|TERMIOS.ECHONL|
TERMIOS.ISIG|TERMIOS.IEXTEN) #|TERMIOS.ECHOPRT
for flag in ('ECHOCTL', 'ECHOKE'): # netbsd workaround for Erk
if hasattr(TERMIOS, flag):
lflag &= ~getattr(TERMIOS, flag)
oflag &= ~(TERMIOS.OPOST)
iflag &= ~(TERMIOS.INLCR|TERMIOS.IGNCR|TERMIOS.ICRNL|TERMIOS.IGNBRK)
if hasattr(TERMIOS, 'IUCLC'):
iflag &= ~TERMIOS.IUCLC
if hasattr(TERMIOS, 'PARMRK'):
iflag &= ~TERMIOS.PARMRK
# setup baud rate
try:
ispeed = ospeed = getattr(TERMIOS, 'B%s' % (self._baudrate))
except AttributeError:
try:
ispeed = ospeed = baudrate_constants[self._baudrate]
except KeyError:
#~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
# may need custom baud rate, it isn't in our list.
ispeed = ospeed = getattr(TERMIOS, 'B38400')
try:
custom_baud = int(self._baudrate) # store for later
except ValueError:
raise ValueError('Invalid baud rate: %r' % self._baudrate)
else:
if custom_baud < 0:
raise ValueError('Invalid baud rate: %r' % self._baudrate)
# setup char len
cflag &= ~TERMIOS.CSIZE
if self._bytesize == 8:
cflag |= TERMIOS.CS8
elif self._bytesize == 7:
cflag |= TERMIOS.CS7
elif self._bytesize == 6:
cflag |= TERMIOS.CS6
elif self._bytesize == 5:
cflag |= TERMIOS.CS5
else:
raise ValueError('Invalid char len: %r' % self._bytesize)
# setup stopbits
if self._stopbits == STOPBITS_ONE:
cflag &= ~(TERMIOS.CSTOPB)
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
cflag |= (TERMIOS.CSTOPB) # XXX same as TWO.. there is no POSIX support for 1.5
elif self._stopbits == STOPBITS_TWO:
cflag |= (TERMIOS.CSTOPB)
else:
raise ValueError('Invalid stop bit specification: %r' % self._stopbits)
# setup parity
iflag &= ~(TERMIOS.INPCK|TERMIOS.ISTRIP)
if self._parity == PARITY_NONE:
cflag &= ~(TERMIOS.PARENB|TERMIOS.PARODD)
elif self._parity == PARITY_EVEN:
cflag &= ~(TERMIOS.PARODD)
cflag |= (TERMIOS.PARENB)
elif self._parity == PARITY_ODD:
cflag |= (TERMIOS.PARENB|TERMIOS.PARODD)
else:
raise ValueError('Invalid parity: %r' % self._parity)
# setup flow control
# xonxoff
if hasattr(TERMIOS, 'IXANY'):
if self._xonxoff:
iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) #|TERMIOS.IXANY)
else:
iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF|TERMIOS.IXANY)
else:
if self._xonxoff:
iflag |= (TERMIOS.IXON|TERMIOS.IXOFF)
else:
iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF)
# rtscts
if hasattr(TERMIOS, 'CRTSCTS'):
if self._rtscts:
cflag |= (TERMIOS.CRTSCTS)
else:
cflag &= ~(TERMIOS.CRTSCTS)
elif hasattr(TERMIOS, 'CNEW_RTSCTS'): # try it with alternate constant name
if self._rtscts:
cflag |= (TERMIOS.CNEW_RTSCTS)
else:
cflag &= ~(TERMIOS.CNEW_RTSCTS)
# XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
# buffer
# vmin "minimal number of characters to be read. = for non blocking"
if vmin < 0 or vmin > 255:
raise ValueError('Invalid vmin: %r ' % vmin)
cc[TERMIOS.VMIN] = vmin
# vtime
if vtime < 0 or vtime > 255:
raise ValueError('Invalid vtime: %r' % vtime)
cc[TERMIOS.VTIME] = vtime
# activate settings
if [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] != orig_attr:
termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
# apply custom baud rate, if any
if custom_baud is not None:
set_special_baudrate(self, custom_baud)
def close(self):
"""Close port"""
if self._isOpen:
if self.fd is not None:
os.close(self.fd)
self.fd = None
self._isOpen = False
def makeDeviceName(self, port):
return device(port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
#~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str)
s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
return struct.unpack('I',s)[0]
# select based implementation, proved to work on many systems
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self._isOpen: raise portNotOpenError
read = bytearray()
while len(read) < size:
ready,_,_ = select.select([self.fd],[],[], self._timeout)
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when there
# is nothing to read.
if not ready:
break # timeout
buf = os.read(self.fd, size-len(read))
# read should always return some data as select reported it was
# ready to read when we get to this point.
if not buf:
# Disconnected devices, at least on Linux, show the
# behavior that they are always ready to read immediately
# but reading returns nothing.
raise SerialException('device reports readiness to read but returned no data (device disconnected?)')
read.extend(buf)
return bytes(read)
def write(self, data):
"""Output the given string over the serial port."""
if not self._isOpen: raise portNotOpenError
t = len(data)
d = data
if self._writeTimeout is not None and self._writeTimeout > 0:
timeout = time.time() + self._writeTimeout
else:
timeout = None
while t > 0:
try:
n = os.write(self.fd, d)
if timeout:
# when timeout is set, use select to wait for being ready
# with the time left as timeout
timeleft = timeout - time.time()
if timeleft < 0:
raise writeTimeoutError
_, ready, _ = select.select([], [self.fd], [], timeleft)
if not ready:
raise writeTimeoutError
d = d[n:]
t = t - n
except OSError as v:
if v.errno != errno.EAGAIN:
raise SerialException('write failed: %s' % (v,))
return len(data)
def flush(self):
"""Flush of file like objects. In this case, wait until all data
is written."""
self.drainOutput()
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
termios.tcflush(self.fd, TERMIOS.TCIFLUSH)
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
termios.tcflush(self.fd, TERMIOS.TCOFLUSH)
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given duration."""
if not self._isOpen: raise portNotOpenError
termios.tcsendbreak(self.fd, int(duration/0.25))
def setBreak(self, level=1):
"""Set break: Controls TXD. When active, no transmitting is possible."""
if self.fd is None: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCSBRK)
else:
fcntl.ioctl(self.fd, TIOCCBRK)
def setRTS(self, level=1):
"""Set terminal status line: Request To Send"""
if not self._isOpen: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
def setDTR(self, level=1):
"""Set terminal status line: Data Terminal Ready"""
if not self._isOpen: raise portNotOpenError
if level:
fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
else:
fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_CTS != 0
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_DSR != 0
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_RI != 0
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self._isOpen: raise portNotOpenError
s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
return struct.unpack('I',s)[0] & TIOCM_CD != 0
# - - platform specific - - - -
def drainOutput(self):
"""internal - not portable!"""
if not self._isOpen: raise portNotOpenError
termios.tcdrain(self.fd)
def nonblocking(self):
"""internal - not portable!"""
if not self._isOpen: raise portNotOpenError
fcntl.fcntl(self.fd, FCNTL.F_SETFL, os.O_NONBLOCK)
def fileno(self):
"""For easier use of the serial port instance with select.
WARNING: this function is not portable to different platforms!"""
if not self._isOpen: raise portNotOpenError
return self.fd
def flowControl(self, enable):
"""manually control flow - when hardware or software flow control is
enabled"""
if not self._isOpen: raise portNotOpenError
if enable:
termios.tcflow(self.fd, TERMIOS.TCION)
else:
termios.tcflow(self.fd, TERMIOS.TCIOFF)
# assemble Serial class with the platform specifc implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derrive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(PosixSerial, FileLike):
pass
else:
# io library present
class Serial(PosixSerial, io.RawIOBase):
pass
class PosixPollSerial(Serial):
"""poll based read implementation. not all systems support poll properly.
however this one has better handling of errors, such as a device
disconnecting while it's in use (e.g. USB-serial unplugged)"""
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if self.fd is None: raise portNotOpenError
read = bytearray()
poll = select.poll()
poll.register(self.fd, select.POLLIN|select.POLLERR|select.POLLHUP|select.POLLNVAL)
if size > 0:
while len(read) < size:
# print "\tread(): size",size, "have", len(read) #debug
# wait until device becomes ready to read (or something fails)
for fd, event in poll.poll(self._timeout*1000):
if event & (select.POLLERR|select.POLLHUP|select.POLLNVAL):
raise SerialException('device reports error (poll)')
# we don't care if it is select.POLLIN or timeout, that's
# handled below
buf = os.read(self.fd, size - len(read))
read.extend(buf)
if ((self._timeout is not None and self._timeout >= 0) or
(self._interCharTimeout is not None and self._interCharTimeout > 0)) and not buf:
break # early abort on timeout
return bytes(read)
if __name__ == '__main__':
s = Serial(0,
baudrate=19200, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_EVEN, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=3, # set a timeout value, None for waiting forever
xonxoff=0, # enable software flow control
rtscts=0, # enable RTS/CTS flow control
)
s.setRTS(1)
s.setDTR(1)
s.flushInput()
s.flushOutput()
s.write('hello')
sys.stdout.write('%r\n' % s.read(5))
sys.stdout.write('%s\n' % s.inWaiting())
del s
| mit |
gaddman/ansible | test/units/modules/network/nxos/test_nxos_vxlan_vtep.py | 63 | 2534 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_vxlan_vtep
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVxlanVtepVniModule(TestNxosModule):
module = nxos_vxlan_vtep
def setUp(self):
super(TestNxosVxlanVtepVniModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosVxlanVtepVniModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg')
self.load_config.return_value = None
def test_nxos_vxlan_vtep(self):
set_module_args(dict(interface='nve1', description='simple description'))
self.execute_module(changed=True, commands=['interface nve1', 'terminal dont-ask', 'description simple description'])
def test_nxos_vxlan_vtep_present_no_change(self):
set_module_args(dict(interface='nve1'))
self.execute_module(changed=False, commands=[])
def test_nxos_vxlan_vtep_absent(self):
set_module_args(dict(interface='nve1', state='absent'))
self.execute_module(changed=True, commands=['no interface nve1'])
def test_nxos_vxlan_vtep_absent_no_change(self):
set_module_args(dict(interface='nve2', state='absent'))
self.execute_module(changed=False, commands=[])
| gpl-3.0 |
raycarnes/account-financial-tools | account_check_deposit/__openerp__.py | 18 | 2364 | # -*- coding: utf-8 -*-
###############################################################################
#
# account_check_deposit for Odoo/OpenERP
# Copyright (C) 2012-2015 Akretion (http://www.akretion.com/)
# @author: Benoît GUILLOT <[email protected]>
# @author: Chafique DELLI <[email protected]>
# @author: Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Account Check Deposit',
'version': '0.1',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Manage deposit of checks to the bank',
'description': """
Account Check Deposit
=====================
This module allows you to easily manage check deposits : you can select all
the checks you received as payments and create a global deposit for the
selected checks.
A journal for received checks is automatically created.
You must configure on this journal the default debit account and the default
credit account. You must also configure on the company the account for
check deposits.
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com/',
'depends': [
'account_accountant',
'report_webkit',
],
'data': [
'account_deposit_view.xml',
'account_move_line_view.xml',
'account_deposit_sequence.xml',
'company_view.xml',
'security/ir.model.access.csv',
'security/check_deposit_security.xml',
'account_data.xml',
'report.xml',
'report/report_checkdeposit.xml',
],
'installable': True,
'application': True,
}
| agpl-3.0 |
uclapi/uclapi | backend/uclapi/timetable/migrations/0003_auto_20171007_1558.py | 1 | 26087 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-07 15:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetable', '0002_caching'),
]
operations = [
migrations.AlterField(
model_name='lecturera',
name='cost',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturera',
name='covingprior',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturera',
name='covprior',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturera',
name='displectid',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='lecturera',
name='excludecover',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturera',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='lecturerb',
name='cost',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturerb',
name='covingprior',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturerb',
name='covprior',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturerb',
name='displectid',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='lecturerb',
name='excludecover',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='lecturerb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='modulea',
name='csize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulea',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='modulea',
name='instid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulea',
name='lectgroup',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulea',
name='maxsize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulea',
name='minsize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='modulea',
name='moduleid',
field=models.TextField(max_length=12),
),
migrations.AlterField(
model_name='modulea',
name='prefmaxsize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='moduleb',
name='csize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='moduleb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='moduleb',
name='instid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='moduleb',
name='lectgroup',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='moduleb',
name='maxsize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='moduleb',
name='minsize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='moduleb',
name='moduleid',
field=models.TextField(max_length=12),
),
migrations.AlterField(
model_name='moduleb',
name='prefmaxsize',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsa',
name='capacity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsa',
name='classification',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='roomsa',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='roomsa',
name='prefmax',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsa',
name='prefmin',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsa',
name='roomarea',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsb',
name='capacity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsb',
name='classification',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='roomsb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='roomsb',
name='prefmax',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsb',
name='prefmin',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='roomsb',
name='roomarea',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='sitesa',
name='address2',
field=models.TextField(max_length=80),
),
migrations.AlterField(
model_name='sitesa',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='sitesb',
name='address2',
field=models.TextField(max_length=80),
),
migrations.AlterField(
model_name='sitesb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='timetablea',
name='associd',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='capacity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='chainid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='compinstid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='courseyear',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='drstatus',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='duration',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='evpriority',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='exclid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='fixweight',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='gendatanum',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='groupid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='timetablea',
name='instid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='lectgrp',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='linkid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='locked',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='maxperiodgap',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='maxrooms',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='nonconid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='numperiods',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='periodid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='readlock',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='regid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='roompoolid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='siteproximity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='sizeused',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='slotentry',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='slotid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='slottotal',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='sourcechange',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='specid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='status',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='tobescheduled',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='tweightid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='typeevent',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='weekday',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='weekid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetablea',
name='zoneproximity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='associd',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='capacity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='chainid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='compinstid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='courseyear',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='drstatus',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='duration',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='evpriority',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='exclid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='fixweight',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='gendatanum',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='groupid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='timetableb',
name='instid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='lectgrp',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='linkid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='locked',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='maxperiodgap',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='maxrooms',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='nonconid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='numperiods',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='periodid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='readlock',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='regid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='roompoolid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='siteproximity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='sizeused',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='slotentry',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='slotid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='slottotal',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='sourcechange',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='specid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='status',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='tobescheduled',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='tweightid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='typeevent',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='weekday',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='weekid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='timetableb',
name='zoneproximity',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapnumerica',
name='drstatus',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapnumerica',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='weekmapnumerica',
name='setid',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='weekmapnumerica',
name='weekid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapnumerica',
name='weeknumber',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapnumericb',
name='drstatus',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapnumericb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='weekmapnumericb',
name='setid',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='weekmapnumericb',
name='weekid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapnumericb',
name='weeknumber',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapstringa',
name='drstatus',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapstringa',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='weekmapstringa',
name='numweeks',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapstringa',
name='statweeks',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='weekmapstringa',
name='weekid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapstringb',
name='drstatus',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapstringb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='weekmapstringb',
name='numweeks',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekmapstringb',
name='statweeks',
field=models.TextField(max_length=10),
),
migrations.AlterField(
model_name='weekmapstringb',
name='weekid',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekstructurea',
name='description',
field=models.TextField(max_length=80),
),
migrations.AlterField(
model_name='weekstructurea',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='weekstructurea',
name='mappedto',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekstructurea',
name='weeknumber',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekstructureb',
name='description',
field=models.TextField(max_length=80),
),
migrations.AlterField(
model_name='weekstructureb',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='weekstructureb',
name='mappedto',
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='weekstructureb',
name='weeknumber',
field=models.BigIntegerField(blank=True, null=True),
),
]
| mit |
uni-peter-zheng/autotest | client/shared/profiler_manager_unittest.py | 4 | 2880 | #!/usr/bin/python
import unittest
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared import profiler_manager
# simple job stub for using in tests
class stub_job(object):
tmpdir = "/home/autotest/tmp"
autodir = "/home/autotest"
# simple profiler stub for using in tests
class stub_profiler(object):
started = 0
def __init__(self, name):
self.name = name
@classmethod
def start(cls, test):
cls.started += 1
@classmethod
def stop(cls, test):
cls.started -= 1
# replace profiler_manager.load_profiler with a simple stub
class stub_manager(profiler_manager.profiler_manager):
def load_profiler(self, profiler, args, dargs):
return stub_profiler(profiler)
class TestProfilerManager(unittest.TestCase):
def test_starts_with_no_profilers(self):
p = stub_manager(stub_job)
self.assertEqual(set(), p.current_profilers())
def test_single_add(self):
p = stub_manager(stub_job)
p.add("prof1")
self.assertEqual(set(["prof1"]), p.current_profilers())
def test_duplicate_adds(self):
p = stub_manager(stub_job)
p.add("prof1")
p.add("prof1")
self.assertEqual(set(["prof1"]), p.current_profilers())
def test_multiple_adds(self):
p = stub_manager(stub_job)
p.add("prof1")
p.add("prof2")
self.assertEqual(set(["prof1", "prof2"]), p.current_profilers())
def test_add_and_delete(self):
p = stub_manager(stub_job)
p.add("prof1")
p.add("prof2")
p.delete("prof1")
self.assertEqual(set(["prof2"]), p.current_profilers())
def test_present_with_no_profilers(self):
p = stub_manager(stub_job)
self.assertEqual(False, p.present())
def test_present_after_add(self):
p = stub_manager(stub_job)
p.add("prof1")
self.assertEqual(True, p.present())
def test_present_after_add_and_remove(self):
p = stub_manager(stub_job)
p.add("prof1")
p.delete("prof1")
self.assertEqual(False, p.present())
def test_started(self):
p = stub_manager(stub_job)
p.add("prof1")
p.add("prof2")
started = stub_profiler.started
self.assertEqual(False, p.active())
p.start(object())
self.assertEqual(started + 2, stub_profiler.started)
self.assertEqual(True, p.active())
def test_stop(self):
p = stub_manager(stub_job)
p.add("prof1")
p.add("prof2")
started = stub_profiler.started
self.assertEqual(False, p.active())
test = object()
p.start(test)
p.stop(test)
self.assertEqual(started, stub_profiler.started)
self.assertEqual(False, p.active())
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
preo/dnspython | dns/node.py | 49 | 6028 | # Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS nodes. A node is a set of rdatasets."""
import StringIO
import dns.rdataset
import dns.rdatatype
import dns.renderer
class Node(object):
"""A DNS node.
A node is a set of rdatasets
@ivar rdatasets: the node's rdatasets
@type rdatasets: list of dns.rdataset.Rdataset objects"""
__slots__ = ['rdatasets']
def __init__(self):
"""Initialize a DNS node.
"""
self.rdatasets = [];
def to_text(self, name, **kw):
"""Convert a node to text format.
Each rdataset at the node is printed. Any keyword arguments
to this method are passed on to the rdataset's to_text() method.
@param name: the owner name of the rdatasets
@type name: dns.name.Name object
@rtype: string
"""
s = StringIO.StringIO()
for rds in self.rdatasets:
if len(rds) > 0:
print >> s, rds.to_text(name, **kw)
return s.getvalue()[:-1]
def __repr__(self):
return '<DNS node ' + str(id(self)) + '>'
def __eq__(self, other):
"""Two nodes are equal if they have the same rdatasets.
@rtype: bool
"""
#
# This is inefficient. Good thing we don't need to do it much.
#
for rd in self.rdatasets:
if rd not in other.rdatasets:
return False
for rd in other.rdatasets:
if rd not in self.rdatasets:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.rdatasets)
def __iter__(self):
return iter(self.rdatasets)
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Find an rdataset matching the specified properties in the
current node.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@raises KeyError: An rdataset of the desired type and class does
not exist and I{create} is not True.
@rtype: dns.rdataset.Rdataset object
"""
for rds in self.rdatasets:
if rds.match(rdclass, rdtype, covers):
return rds
if not create:
raise KeyError
rds = dns.rdataset.Rdataset(rdclass, rdtype)
self.rdatasets.append(rds)
return rds
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching the specified properties in the
current node.
If a matching rdataset does not exist, it is not an error.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
"""
rds = self.get_rdataset(rdclass, rdtype, covers)
if not rds is None:
self.rdatasets.remove(rds)
def replace_rdataset(self, replacement):
"""Replace an rdataset.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the node;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
"""
if not isinstance(replacement, dns.rdataset.Rdataset):
raise ValueError, 'replacement is not an rdataset'
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
replacement.covers)
self.rdatasets.append(replacement)
| isc |
jwvhewitt/dmeternal | old_game/monsters/misc.py | 1 | 23825 | from . import base
from .. import stats
from .. import items
from .. import dialogue
from .. import context
from .. import spells
from .. import invocations
from .. import effects
from .. import animobs
from .. import targetarea
from .. import aibrain
from . import animals
from . import treasuretype
from . import abilities
from .. import enchantments
# Contains critters that don't quite fit in anywhere else.
# *******************************
# *** ENCOUNTER LEVEL 1 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 2 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 3 ***
# *******************************
class EvilEye( base.Monster ):
name = "Evil Eye"
statline = { stats.STRENGTH: 6, stats.TOUGHNESS: 12, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 10, stats.PIETY: 10, stats.CHARISMA: 2, \
stats.MAGIC_ATTACK: 20, stats.MAGIC_DEFENSE: 10 }
SPRITENAME = "monster_default.png"
FRAME = 18
TEMPLATES = ()
MOVE_POINTS = 6
VOICE = None
HABITAT = ( context.HAB_CAVE, context.HAB_TUNNELS, context.SET_EVERY,
context.DES_LUNAR, context.MTY_BOSS,
context.MTY_BEAST, context.GEN_CHAOS )
ENC_LEVEL = 3
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_LUNAR,
skill_mod=stats.REFLEXES, hit_anim=animobs.PurpleExplosion, extra_effect =
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, on_success = (
effects.Paralyze( max_duration = 3 )
,) )
)
TECHNIQUES = ( invocations.MPInvocation( "Evil Gaze",
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, att_modifier=10, on_success = (
effects.Paralyze( max_duration = 3 )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=4), shot_anim=animobs.PurpleVortex,
ai_tar=invocations.TargetMobileEnemy(), mp_cost=3
), )
def init_monster( self ):
self.levels.append( base.Beast( 3, self ) )
# *******************************
# *** ENCOUNTER LEVEL 4 ***
# *******************************
class Cockatrice( base.Monster ):
name = "Cockatrice"
statline = { stats.STRENGTH: 8, stats.TOUGHNESS: 8, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 1, stats.PIETY: 10, stats.CHARISMA: 4 }
SPRITENAME = "monster_default.png"
FRAME = 21
TEMPLATES = ()
MOVE_POINTS = 10
VOICE = None
HABITAT = ( context.HAB_EVERY, context.SET_EVERY, context.SET_RENFAN,
context.DES_AIR, context.DES_EARTH,
context.MTY_BEAST, context.MTY_BOSS )
ENC_LEVEL = 4
COMPANIONS = (animals.Chicken,)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_PIERCING, skill_mod=stats.REFLEXES )
TECHNIQUES = ( invocations.MPInvocation( "Death Gaze",
effects.OpposedRoll( att_stat=stats.PIETY, att_modifier=-10, on_success = (
effects.InstaKill( anim=animobs.CriticalHit )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=4), shot_anim=animobs.PurpleVortex, ai_tar=invocations.TargetEnemy(), mp_cost=4
), )
def init_monster( self ):
self.levels.append( base.Beast( 3, self ) )
class CorpseEater( base.Monster ):
name = "Corpse Eater"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 14, stats.REFLEXES: 8, \
stats.INTELLIGENCE: 2, stats.PIETY: 12, stats.CHARISMA: 2 }
SPRITENAME = "monster_default.png"
FRAME = 13
TEMPLATES = (stats.BUG,)
MOVE_POINTS = 8
VOICE = None
HABITAT = ( context.HAB_EVERY, context.HAB_TUNNELS, context.SET_EVERY,
context.MAP_DUNGEON,
context.DES_LUNAR,
context.MTY_BEAST )
ENC_LEVEL = 4
ATTACK = items.Attack( (3,4,0), element = stats.RESIST_PIERCING, extra_effect =
effects.OpposedRoll( att_stat=stats.TOUGHNESS, on_success = (
effects.Paralyze( max_duration = 6 )
,) )
)
TECHNIQUES = ( invocations.MPInvocation( "Tentacle Slime",
effects.TargetIsEnemy( on_true = (
effects.OpposedRoll( anim=animobs.GreenSplat, att_stat=stats.TOUGHNESS, on_success = (
effects.Paralyze( max_duration = 3 )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ),
)
), com_tar=targetarea.SelfCentered(radius=1,exclude_middle=True), ai_tar=invocations.TargetEnemy(), mp_cost=8 ), )
def init_monster( self ):
self.levels.append( base.Beast( 4, self ) )
# *******************************
# *** ENCOUNTER LEVEL 5 ***
# *******************************
class Gargoyle( base.Monster ):
name = "Gargoyle"
statline = { stats.STRENGTH: 15, stats.TOUGHNESS: 18, stats.REFLEXES: 14, \
stats.INTELLIGENCE: 6, stats.PIETY: 11, stats.CHARISMA: 7,
stats.RESIST_CRUSHING: 50, stats.RESIST_PIERCING: 50,
stats.RESIST_SLASHING: 50, stats.PHYSICAL_ATTACK: 10, stats.NATURAL_DEFENSE: 5 }
SPRITENAME = "monster_default.png"
FRAME = 22
TEMPLATES = (stats.EARTH,stats.ROCK)
MOVE_POINTS = 16
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MTY_BOSS,
context.MAP_DUNGEON, context.DES_EARTH )
ENC_LEVEL = 5
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Humanoid( 4, self ) )
# *******************************
# *** ENCOUNTER LEVEL 6 ***
# *******************************
class Basilisk( base.Monster ):
name = "Basilisk"
statline = { stats.STRENGTH: 15, stats.TOUGHNESS: 15, stats.REFLEXES: 8, \
stats.INTELLIGENCE: 2, stats.PIETY: 12, stats.CHARISMA: 11 }
SPRITENAME = "monster_default.png"
FRAME = 39
TEMPLATES = (stats.REPTILE,)
MOVE_POINTS = 8
VOICE = None
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MTY_BEAST, context.MTY_BOSS )
ENC_LEVEL = 6
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Death Gaze",
effects.OpposedRoll( att_stat=stats.PIETY, att_modifier=-10, on_success = (
effects.InstaKill( anim=animobs.CriticalHit )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=4), shot_anim=animobs.PurpleVortex, ai_tar=invocations.TargetEnemy(), mp_cost=6
), )
def init_monster( self ):
self.levels.append( base.Beast( 6, self ) )
class Griffin( base.Monster ):
name = "Griffin"
statline = { stats.STRENGTH: 18, stats.TOUGHNESS: 16, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 5, stats.PIETY: 13, stats.CHARISMA: 8,
stats.PHYSICAL_ATTACK: 5, stats.NATURAL_DEFENSE: 5, stats.MAGIC_DEFENSE: 5 }
SPRITENAME = "monster_default.png"
FRAME = 35
TEMPLATES = ()
MOVE_POINTS = 12
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS, context.DES_AIR,
context.MTY_BEAST, context.GEN_NATURE )
ENC_LEVEL = 6
TREASURE = None
ATTACK = items.Attack( (2,6,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Beast( 7, self ) )
class Harpy( base.Monster ):
name = "Harpy"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 10, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 7, stats.PIETY: 12, stats.CHARISMA: 17 }
SPRITENAME = "monster_default.png"
FRAME = 38
TEMPLATES = ()
MOVE_POINTS = 8
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.HAB_CAVE, context.SET_EVERY,
context.DES_LUNAR,
context.MTY_HUMANOID, context.MTY_BOSS, context.GEN_CHAOS )
ENC_LEVEL = 6
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_SLASHING )
TECHNIQUES = (invocations.MPInvocation( "Sleep Song",
effects.TargetIsEnemy( anim=animobs.SonicHit, on_true = (
effects.TargetIs( pat=effects.ANIMAL, on_true = (
effects.OpposedRoll( att_modifier=0, on_success = (
effects.CauseSleep(),
)),)
,), )), com_tar=targetarea.SelfCentered(radius=6,delay_from=-1),
ai_tar=invocations.TargetMobileEnemy(), mp_cost=8 ),
)
def init_monster( self ):
self.levels.append( base.Humanoid( 7, self ) )
class Owlbear( base.Monster ):
name = "Owlbear"
statline = { stats.STRENGTH: 21, stats.TOUGHNESS: 21, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 2, stats.PIETY: 12, stats.CHARISMA: 10 }
SPRITENAME = "monster_default.png"
FRAME = 27
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_BEAST, context.GEN_NATURE )
ENC_LEVEL = 6
TREASURE = None
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Beast( 6, self ) )
# *******************************
# *** ENCOUNTER LEVEL 7 ***
# *******************************
class Lamia( base.Monster ):
name = "Lamia"
statline = { stats.STRENGTH: 18, stats.TOUGHNESS: 12, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 13, stats.PIETY: 15, stats.CHARISMA: 12 }
SPRITENAME = "monster_default.png"
FRAME = 2
TEMPLATES = ()
MOVE_POINTS = 10
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.HAB_DESERT, context.SET_EVERY,
context.DES_LUNAR,
context.MTY_HUMANOID, context.MTY_BOSS )
ENC_LEVEL = 7
TREASURE = treasuretype.HighItems()
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_SLASHING, extra_effect=
effects.StatDamage( stats.PIETY, amount=4, anim=animobs.GreenBoom )
)
TECHNIQUES = ( invocations.MPInvocation( "Spirit Drain",
effects.TargetIsEnemy( on_true = (
effects.OpposedRoll( on_success = (
effects.ManaDamage( (1,8,0), stat_bonus=stats.TOUGHNESS, anim=animobs.PurpleExplosion ),
effects.CauseSleep()
,), on_failure = (
effects.ManaDamage( (1,8,0), stat_bonus=None, anim=animobs.PurpleExplosion )
,)),), on_false= (
effects.NoEffect( anim=animobs.PurpleExplosion )
,)), com_tar=targetarea.Cone(reach=4), ai_tar=invocations.TargetEnemy(), mp_cost=12
), )
def init_monster( self ):
self.levels.append( base.Humanoid( 8, self ) )
class Manticore( base.Monster ):
name = "Manticore"
statline = { stats.STRENGTH: 20, stats.TOUGHNESS: 19, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 7, stats.PIETY: 12, stats.CHARISMA: 9 }
SPRITENAME = "monster_default.png"
FRAME = 26
TEMPLATES = ()
MOVE_POINTS = 12
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS, context.MTY_BEAST, context.MTY_BOSS )
ENC_LEVEL = 7
COMBAT_AI = aibrain.ArcherAI()
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_PIERCING, extra_effect=abilities.POISON_ATTACK )
TECHNIQUES = (invocations.MPInvocation( "Tail Spikes",
effects.NoEffect( children=(
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
)),
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
) ),
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
) ),
effects.PhysicalAttackRoll( att_stat=stats.REFLEXES, att_modifier=5, on_success = (
effects.HealthDamage( (1,8,0), stat_bonus=stats.STRENGTH, element=stats.RESIST_PIERCING, anim=animobs.RedBoom ),
), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom ),
) ),
),), mp_cost=10, com_tar=targetarea.SingleTarget(reach=9), shot_anim=animobs.GoldStone, ai_tar=invocations.TargetEnemy()
),
)
def init_monster( self ):
self.levels.append( base.Beast( 6, self ) )
# *******************************
# *** ENCOUNTER LEVEL 8 ***
# *******************************
# Megaraptor
class Wyvern( base.Monster ):
name = "Wyvern"
statline = { stats.STRENGTH: 19, stats.TOUGHNESS: 15, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 6, stats.PIETY: 12, stats.CHARISMA: 9 }
SPRITENAME = "monster_default.png"
FRAME = 44
TEMPLATES = (stats.DRAGON,)
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_BEAST, context.GEN_DRAGON )
ENC_LEVEL = 8
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,6,0), element = stats.RESIST_PIERCING,
extra_effect=abilities.POISON_ATTACK_1d8 )
TECHNIQUES = ()
def init_monster( self ):
self.levels.append( base.Terror( 8, self ) )
# *******************************
# *** ENCOUNTER LEVEL 9 ***
# *******************************
class Chimera( base.Monster ):
name = "Chimera"
# This is based on the version from the Pathfinder SRD rather than the
# regular SRD; the only difference is the beefed-up breath weapon.
statline = { stats.STRENGTH: 19, stats.TOUGHNESS: 17, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 4, stats.PIETY: 13, stats.CHARISMA: 10,
stats.AWARENESS: 50 }
SPRITENAME = "monster_by_Joe.png"
FRAME = 0
TEMPLATES = ()
MOVE_POINTS = 12
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.SET_EVERY, context.DES_FIRE,
context.MTY_BEAST, context.GEN_CHAOS, context.MTY_BOSS )
ENC_LEVEL = 9
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (2,8,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Fire Breath",
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (6,8,0), stat_bonus=stats.TOUGHNESS, element=stats.RESIST_FIRE, anim=animobs.RedCloud )
,), on_failure = (
effects.HealthDamage( (3,8,0), stat_bonus=None, element=stats.RESIST_FIRE, anim=animobs.RedCloud )
,) ), com_tar=targetarea.Cone(reach=4), ai_tar=invocations.TargetEnemy(), mp_cost=16
), )
def init_monster( self ):
self.levels.append( base.Terror( 9, self ) )
class Medusa( base.Monster ):
name = "Medusa"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 12, stats.REFLEXES: 15, \
stats.INTELLIGENCE: 12, stats.PIETY: 13, stats.CHARISMA: 15 }
SPRITENAME = "monster_default.png"
FRAME = 30
TEMPLATES = ()
MOVE_POINTS = 10
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MTY_HUMANOID )
ENC_LEVEL = 9
COMBAT_AI = aibrain.ArcherAI(approach_allies=0,technique_chance=75)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_PIERCING, extra_effect=abilities.POISON_ATTACK_2d6 )
TECHNIQUES = ( invocations.MPInvocation( "Death Gaze",
effects.OpposedRoll( att_stat=stats.PIETY, att_modifier=-10, on_success = (
effects.InstaKill( anim=animobs.CriticalHit )
,), on_failure = (
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(reach=6), shot_anim=animobs.PurpleVortex, ai_tar=invocations.TargetEnemy(), mp_cost=9
), abilities.LONGBOW )
def init_monster( self ):
self.levels.append( base.Humanoid( 6, self ) )
class Umbull( base.Monster ):
name = "Umbull"
statline = { stats.STRENGTH: 23, stats.TOUGHNESS: 19, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 9, stats.PIETY: 11, stats.CHARISMA: 13 }
SPRITENAME = "monster_default.png"
FRAME = 4
TEMPLATES = ()
MOVE_POINTS = 8
HABITAT = ( context.HAB_CAVE, context.SET_EVERY,
context.MAP_DUNGEON, context.DES_EARTH )
ENC_LEVEL = 9
COMBAT_AI = aibrain.BruiserAI()
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (3,6,0), element = stats.RESIST_SLASHING )
TECHNIQUES = (invocations.Invocation( "Freezing Gaze",
effects.OpposedRoll( att_modifier=20, on_success = (
effects.Paralyze( max_duration = 6 )
,), on_failure =(
effects.NoEffect( anim=animobs.SmallBoom )
,) ), com_tar=targetarea.SingleTarget(), shot_anim=animobs.PurpleVortex,
ai_tar=invocations.TargetMobileEnemy() ),
)
def init_monster( self ):
self.levels.append( base.Defender( 9, self ) )
# ********************************
# *** ENCOUNTER LEVEL 10 ***
# ********************************
class Sphinx( base.Monster ):
name = "Sphinx"
statline = { stats.STRENGTH: 19, stats.TOUGHNESS: 13, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 18, stats.PIETY: 19, stats.CHARISMA: 19, \
stats.NATURAL_DEFENSE: 15 }
SPRITENAME = "monster_default.png"
FRAME = 37
TEMPLATES = ()
MOVE_POINTS = 12
VOICE = dialogue.voice.GREEK
HABITAT = ( context.HAB_DESERT, context.SET_EVERY,
context.MAP_WILDERNESS,
context.DES_SOLAR,
context.MTY_HUMANOID, context.MTY_LEADER, context.MTY_BOSS )
ENC_LEVEL = 10
TREASURE = treasuretype.High()
ATTACK = items.Attack( (2,6,4), element = stats.RESIST_SLASHING )
TECHNIQUES = ( spells.lunarspells.DEATH_RAY, spells.airspells.DISPEL_MAGIC,
spells.priestspells.SANCTUARY, spells.solarspells.REMOVE_CURSE,
spells.solarspells.MASS_CURE )
def init_monster( self ):
self.levels.append( base.Terror( 8, self ) )
class Behir( base.Monster ):
name = "Behir"
statline = { stats.STRENGTH: 26, stats.TOUGHNESS: 21, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 7, stats.PIETY: 14, stats.CHARISMA: 12, \
stats.RESIST_LIGHTNING: 150, stats.AWARENESS: 50, stats.CRITICAL_HIT: 10 }
SPRITENAME = "monster_by_Joe.png"
FRAME = 5
TEMPLATES = ()
MOVE_POINTS = 12
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_DUNGEON,
context.DES_AIR,
context.MTY_BOSS )
ENC_LEVEL = 10
TREASURE = treasuretype.Swallowed(scale=1,swag_chance=20)
ATTACK = items.Attack( (2,4,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Lightning Breath",
effects.OpposedRoll( att_stat=stats.REFLEXES, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (7,6,0), stat_bonus=None, element=stats.RESIST_LIGHTNING, anim=animobs.Spark )
,), on_failure = (
effects.HealthDamage( (3,7,0), stat_bonus=None, element=stats.RESIST_LIGHTNING, anim=animobs.Spark )
,) ), com_tar=targetarea.Line(reach=5), ai_tar=invocations.TargetEnemy(), mp_cost=30
), )
def init_monster( self ):
self.levels.append( base.Terror( 9, self ) )
# ********************************
# *** ENCOUNTER LEVEL 11 ***
# ********************************
class Hydra( base.Monster ):
name = "Hydra"
statline = { stats.STRENGTH: 21, stats.TOUGHNESS: 20, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 3, stats.PIETY: 10, stats.CHARISMA: 9,
stats.PHYSICAL_ATTACK: 20 }
SPRITENAME = "monster_default.png"
FRAME = 3
TEMPLATES = (stats.REPTILE,stats.EARTH,)
MOVE_POINTS = 8
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.DES_EARTH, context.MTY_BOSS )
ENC_LEVEL = 11
VOICE = dialogue.voice.GREEK
COMBAT_AI = aibrain.BruiserAI()
TREASURE = treasuretype.Low()
ATTACK = items.Attack( (2,10,0), element = stats.RESIST_PIERCING )
TECHNIQUES = ( invocations.MPInvocation( "Poison Breath",
effects.OpposedRoll( def_stat=stats.TOUGHNESS, on_success = (
effects.HealthDamage( (3,6,0), stat_bonus=stats.TOUGHNESS, element=stats.RESIST_POISON, anim=animobs.PoisonCloud ),
effects.TargetIs( effects.ALIVE, on_true=( effects.OpposedRoll( att_stat=None, def_stat=stats.TOUGHNESS, on_success = (
effects.Enchant( enchantments.PoisonClassic )
,) ), ))
), on_failure = (
effects.HealthDamage( (2,6,0), stat_bonus=None, element=stats.RESIST_POISON, anim=animobs.PoisonCloud )
,) ), com_tar=targetarea.Blast(radius=2), ai_tar=invocations.TargetEnemy(min_distance=3), mp_cost=20, shot_anim=animobs.GreenComet
), )
def init_monster( self ):
self.levels.append( base.Terror( 10, self ) )
self.condition.append( enchantments.PermaMegaRegeneration() )
# ********************************
# *** ENCOUNTER LEVEL 12 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 13 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 14 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 15 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 16 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 17 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 18 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 19 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 20 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 21 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 22 ***
# ********************************
class Kaiju( base.Monster ):
name = "Kaiju"
statline = { stats.STRENGTH: 45, stats.TOUGHNESS: 35, stats.REFLEXES: 16, \
stats.INTELLIGENCE: 3, stats.PIETY: 14, stats.CHARISMA: 14, stats.RESIST_ATOMIC: 50,
stats.RESIST_FIRE: 200, stats.RESIST_POISON: 200, stats.RESIST_LUNAR: 200 }
SPRITENAME = "monster_default.png"
FRAME = 14
TEMPLATES = ()
MOVE_POINTS = 8
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_BEAST )
ENC_LEVEL = 22
VOICE = dialogue.voice.DRACONIAN
COMBAT_AI = aibrain.BruiserAI()
TREASURE = None
ATTACK = items.Attack( (4,8,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( invocations.MPInvocation( "Atomic Breath",
effects.OpposedRoll( att_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (10,6,0), stat_bonus=stats.TOUGHNESS, element=stats.RESIST_ATOMIC, anim=animobs.Nuclear )
,), on_failure = (
effects.HealthDamage( (3,10,0), stat_bonus=None, element=stats.RESIST_ATOMIC, anim=animobs.Nuclear )
,) ), com_tar=targetarea.Cone(reach=8), ai_tar=invocations.TargetEnemy(), mp_cost=60
), )
def init_monster( self ):
self.levels.append( base.Beast( 48, self ) )
self.condition.append( enchantments.PermaMegaRegeneration() )
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.