repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
musicrighter/CIS422-P2
|
env/lib/python3.4/site-packages/pip/_vendor/distlib/_backport/tarfile.py
|
1005
|
92627
|
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel ([email protected])"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
|
artistic-2.0
| 4,546,384,329,438,695,400 | -4,711,394,288,179,216,000 | 34.530111 | 103 | 0.539054 | false |
rdipietro/tensorflow
|
tensorflow/contrib/tensor_forest/python/kernel_tests/grow_tree_op_test.py
|
12
|
4164
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.grow_tree_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class GrowTreeTest(test_util.TensorFlowTestCase):
def setUp(self):
self.tree = tf.Variable([[1, 0], [-1, 0], [-1, 0],
[-2, 0], [-2, 0], [-2, 0], [-2, 0]])
self.tree_thresholds = tf.Variable([0., 0., 0., 0., 0., 0., 0.])
self.eot = tf.Variable([3])
self.node_map = [-1, 0, 1, -1, -1, -1, -1]
self.finished = [1, 2]
self.best_splits = [2, 3]
self.split_features = [[1, 2, 3, 4], [5, 6, 7, 8]]
self.split_thresholds = [[10., 20., 30., 40.], [50., 60., 70., 80.]]
self.ops = training_ops.Load()
def testSimple(self):
with self.test_session():
tf.global_variables_initializer().run()
update_list, tree_updates, threshold_updates, new_eot = (
self.ops.grow_tree(self.eot, self.node_map, self.finished,
self.best_splits, self.split_features,
self.split_thresholds))
self.assertAllEqual([1, 3, 4, 2, 5, 6], update_list.eval())
self.assertAllEqual(
[[3, 3], [-1, -1], [-1, -1], [5, 8], [-1, -1], [-1, -1]],
tree_updates.eval())
self.assertAllEqual([30.0, 0.0, 0.0, 80.0, 0.0, 0.0],
threshold_updates.eval())
self.assertAllEqual([7], new_eot.eval())
def testNoRoomToGrow(self):
with self.test_session():
tf.global_variables_initializer().run()
# Even though there's one free node, there needs to be 2 to grow.
tf.assign(self.eot, [6]).eval()
update_list, tree_updates, threshold_updates, new_eot = (
self.ops.grow_tree(self.eot, self.node_map, self.finished,
self.best_splits, self.split_features,
self.split_thresholds))
self.assertAllEqual([], update_list.eval())
self.assertEquals((0, 2), tree_updates.eval().shape)
self.assertAllEqual([], threshold_updates.eval())
self.assertAllEqual([6], new_eot.eval())
def testNoFinished(self):
with self.test_session():
tf.global_variables_initializer().run()
update_list, tree_updates, threshold_updates, new_eot = (
self.ops.grow_tree(self.eot, self.node_map, [], [],
self.split_features, self.split_thresholds))
self.assertAllEqual([], update_list.eval())
self.assertAllEqual((0, 2), tree_updates.eval().shape)
self.assertAllEqual([], threshold_updates.eval())
self.assertAllEqual([3], new_eot.eval())
def testBadInput(self):
with self.test_session():
tf.global_variables_initializer().run()
with self.assertRaisesOpError(
'Number of finished nodes should be the same in finished and '
'best_splits.'):
update_list, _, _, _ = (self.ops.grow_tree(self.eot, self.node_map, [],
self.best_splits,
self.split_features,
self.split_thresholds))
self.assertAllEqual([], update_list.eval())
if __name__ == '__main__':
googletest.main()
|
apache-2.0
| 2,173,953,872,102,453,500 | 7,268,695,090,183,860,000 | 40.227723 | 80 | 0.585495 | false |
TheIoTLearningInitiative/CodeLabs
|
Hochob/Audio/stream2chromecast/stream2chromecast.py
|
2
|
24427
|
#!/usr/bin/env python
"""
stream2chromecast.py: Chromecast media streamer for Linux
author: Pat Carter - https://github.com/Pat-Carter/stream2chromecast
version: 0.6.3
"""
# Copyright (C) 2014-2016 Pat Carter
#
# This file is part of Stream2chromecast.
#
# Stream2chromecast is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Stream2chromecast is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Stream2chromecast. If not, see <http://www.gnu.org/licenses/>.
VERSION = "0.6.3"
import sys, os, errno
import signal
from cc_media_controller import CCMediaController
import cc_device_finder
import time
import BaseHTTPServer
import urllib
import mimetypes
from threading import Thread
import subprocess
import httplib
import urlparse
import socket
import tempfile
script_name = (sys.argv[0].split(os.sep))[-1]
USAGETEXT = """
Usage
Play a file:-
%s <file>
Pause the current file:-
%s -pause
Continue (un-pause) the current file:-
%s -continue
Stop the current file playing:-
%s -stop
Set the volume to a value between 0 & 1.0 (e.g. 0.5 = half volume):-
%s -setvol <volume>
Adjust the volume up or down by 0.1:-
%s -volup
%s -voldown
Mute the volume:-
%s -mute
Play an unsupported media type (e.g. an mpg file) using ffmpeg or avconv as a realtime transcoder (requires ffmpeg or avconv to be installed):-
%s -transcode <file>
Play remote file using a URL (e.g. a web video):
%s -playurl <URL>
Display Chromecast status:-
%s -status
Search for all Chromecast devices on the network:-
%s -devicelist
Additional option to specify an Chromecast device by name (or ip address) explicitly:
e.g. to play a file on a specific device
%s -devicename <chromecast device name> <file>
Additional option to specify the preferred transcoder tool when both ffmpeg & avconv are available
e.g. to play and transcode a file using avconv
%s -transcoder avconv -transcode <file>
Additional option to specify the port from which the media is streamed. This can be useful in a firewalled environment.
e.g. to serve the media on port 8765
%s -port 8765 <file>
Additional option to specify subtitles. Only WebVTT format is supported.
e.g. to cast the subtitles on /path/to/subtitles.vtt
%s -subtitles /path/to/subtitles.vtt <file>
Additional option to specify the port from which the subtitles is streamed. This can be useful in a firewalled environment.
e.g. to serve the subtitles on port 8765
%s -subtitles_port 8765 <file>
Additional option to specify the subtitles language. The language format is defined by RFC 5646.
e.g. to serve the subtitles french subtitles
%s -subtitles_language fr <file>
Additional option to supply custom parameters to the transcoder (ffmpeg or avconv) output
e.g. to transcode the media with an output video bitrate of 1000k
%s -transcode -transcodeopts '-b:v 1000k' <file>
Additional option to supply custom parameters to the transcoder input
e.g. to transcode the media and seek to a position 15 minutes from the start of playback
%s -transcode -transcodeinputopts '-ss 00:15:00' <file>
Additional option to specify the buffer size of the data returned from the transcoder. Increasing this can help when on a slow network.
e.g. to specify a buffer size of 5 megabytes
%s -transcode -transcodebufsize 5242880 <file>
""" % ((script_name,) * 21)
PIDFILE = os.path.join(tempfile.gettempdir(), "stream2chromecast_%s.pid")
FFMPEG = 'ffmpeg %s -i "%s" -preset ultrafast -f mp4 -frag_duration 3000 -b:v 2000k -loglevel error %s -'
AVCONV = 'avconv %s -i "%s" -preset ultrafast -f mp4 -frag_duration 3000 -b:v 2000k -loglevel error %s -'
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
content_type = "video/mp4"
""" Handle HTTP requests for files which do not need transcoding """
def do_GET(self):
query = self.path.split("?",1)[-1]
filepath = urllib.unquote_plus(query)
self.suppress_socket_error_report = None
self.send_headers(filepath)
print "sending data"
try:
self.write_response(filepath)
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] in (errno.EPIPE, errno.ECONNRESET):
print "disconnected"
self.suppress_socket_error_report = True
return
raise
def handle_one_request(self):
try:
return BaseHTTPServer.BaseHTTPRequestHandler.handle_one_request(self)
except socket.error:
if not self.suppress_socket_error_report:
raise
def finish(self):
try:
return BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except socket.error:
if not self.suppress_socket_error_report:
raise
def send_headers(self, filepath):
self.protocol_version = "HTTP/1.1"
self.send_response(200)
self.send_header("Content-type", self.content_type)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
def write_response(self, filepath):
with open(filepath, "rb") as f:
while True:
line = f.read(1024)
if len(line) == 0:
break
chunk_size = "%0.2X" % len(line)
self.wfile.write(chunk_size)
self.wfile.write("\r\n")
self.wfile.write(line)
self.wfile.write("\r\n")
self.wfile.write("0")
self.wfile.write("\r\n\r\n")
class TranscodingRequestHandler(RequestHandler):
""" Handle HTTP requests for files which require realtime transcoding with ffmpeg """
transcoder_command = FFMPEG
transcode_options = ""
transcode_input_options = ""
bufsize = 0
def write_response(self, filepath):
if self.bufsize != 0:
print "transcode buffer size:", self.bufsize
ffmpeg_command = self.transcoder_command % (self.transcode_input_options, filepath, self.transcode_options)
ffmpeg_process = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, shell=True, bufsize=self.bufsize)
for line in ffmpeg_process.stdout:
chunk_size = "%0.2X" % len(line)
self.wfile.write(chunk_size)
self.wfile.write("\r\n")
self.wfile.write(line)
self.wfile.write("\r\n")
self.wfile.write("0")
self.wfile.write("\r\n\r\n")
class SubRequestHandler(RequestHandler):
""" Handle HTTP requests for subtitles files """
content_type = "text/vtt;charset=utf-8"
def get_transcoder_cmds(preferred_transcoder=None):
""" establish which transcoder utility to use depending on what is installed """
probe_cmd = None
transcoder_cmd = None
ffmpeg_installed = is_transcoder_installed("ffmpeg")
avconv_installed = is_transcoder_installed("avconv")
# if anything other than avconv is preferred, try to use ffmpeg otherwise use avconv
if preferred_transcoder != "avconv":
if ffmpeg_installed:
transcoder_cmd = "ffmpeg"
probe_cmd = "ffprobe"
elif avconv_installed:
print "unable to find ffmpeg - using avconv"
transcoder_cmd = "avconv"
probe_cmd = "avprobe"
# otherwise, avconv is preferred, so try to use avconv, followed by ffmpeg
else:
if avconv_installed:
transcoder_cmd = "avconv"
probe_cmd = "avprobe"
elif ffmpeg_installed:
print "unable to find avconv - using ffmpeg"
transcoder_cmd = "ffmpeg"
probe_cmd = "ffprobe"
return transcoder_cmd, probe_cmd
def is_transcoder_installed(transcoder_application):
""" check for an installation of either ffmpeg or avconv """
try:
subprocess.check_output([transcoder_application, "-version"])
return True
except OSError:
return False
def kill_old_pid(device_ip):
""" attempts to kill a previously running instance of this application casting to the specified device. """
pid_file = PIDFILE % device_ip
try:
with open(pid_file, "r") as pidfile:
pid = int(pidfile.read())
os.killpg(pid, signal.SIGTERM)
except:
pass
def save_pid(device_ip):
""" saves the process id of this application casting to the specified device in a pid file. """
pid_file = PIDFILE % device_ip
with open(pid_file, "w") as pidfile:
pidfile.write("%d" % os.getpid())
def get_mimetype(filename, ffprobe_cmd=None):
""" find the container format of the file """
# default value
mimetype = "video/mp4"
# guess based on filename extension
guess = mimetypes.guess_type(filename)[0]
if guess is not None:
if guess.lower().startswith("video/") or guess.lower().startswith("audio/"):
mimetype = guess
# use the OS file command...
try:
file_cmd = 'file --mime-type -b "%s"' % filename
file_mimetype = subprocess.check_output(file_cmd, shell=True).strip().lower()
if file_mimetype.startswith("video/") or file_mimetype.startswith("audio/"):
mimetype = file_mimetype
print "OS identifies the mimetype as :", mimetype
return mimetype
except:
pass
# use ffmpeg/avconv if installed
if ffprobe_cmd is None:
return mimetype
# ffmpeg/avconv is installed
has_video = False
has_audio = False
format_name = None
ffprobe_cmd = '%s -show_streams -show_format "%s"' % (ffprobe_cmd, filename)
ffmpeg_process = subprocess.Popen(ffprobe_cmd, stdout=subprocess.PIPE, shell=True)
for line in ffmpeg_process.stdout:
if line.startswith("codec_type=audio"):
has_audio = True
elif line.startswith("codec_type=video"):
has_video = True
elif line.startswith("format_name="):
name, value = line.split("=")
format_name = value.strip().lower().split(",")
# use the default if it isn't possible to identify the format type
if format_name is None:
return mimetype
if has_video:
mimetype = "video/"
else:
mimetype = "audio/"
if "mp4" in format_name:
mimetype += "mp4"
elif "webm" in format_name:
mimetype += "webm"
elif "ogg" in format_name:
mimetype += "ogg"
elif "mp3" in format_name:
mimetype = "audio/mpeg"
elif "wav" in format_name:
mimetype = "audio/wav"
else:
mimetype += "mp4"
return mimetype
def play(filename, transcode=False, transcoder=None, transcode_options=None, transcode_input_options=None,
transcode_bufsize=0, device_name=None, server_port=None,
subtitles=None, subtitles_port=None, subtitles_language=None):
""" play a local file or transcode from a file or URL and stream to the chromecast """
print_ident()
cast = CCMediaController(device_name=device_name)
kill_old_pid(cast.host)
save_pid(cast.host)
if os.path.isfile(filename):
filename = os.path.abspath(filename)
print "source is file: %s" % filename
else:
if transcode and (filename.lower().startswith("http://") or filename.lower().startswith("https://") or filename.lower().startswith("rtsp://")):
print "source is URL: %s" % filename
else:
sys.exit("media file %s not found" % filename)
transcoder_cmd, probe_cmd = get_transcoder_cmds(preferred_transcoder=transcoder)
status = cast.get_status()
webserver_ip = status['client'][0]
print "local ip address:", webserver_ip
req_handler = RequestHandler
if transcode:
if transcoder_cmd in ("ffmpeg", "avconv"):
req_handler = TranscodingRequestHandler
if transcoder_cmd == "ffmpeg":
req_handler.transcoder_command = FFMPEG
else:
req_handler.transcoder_command = AVCONV
if transcode_options is not None:
req_handler.transcode_options = transcode_options
if transcode_input_options is not None:
req_handler.transcode_input_options = transcode_input_options
req_handler.bufsize = transcode_bufsize
else:
print "No transcoder is installed. Attempting standard playback"
if req_handler == RequestHandler:
req_handler.content_type = get_mimetype(filename, probe_cmd)
# create a webserver to handle a single request for the media file on either a free port or on a specific port if passed in the port parameter
port = 0
if server_port is not None:
port = int(server_port)
server = BaseHTTPServer.HTTPServer((webserver_ip, port), req_handler)
thread = Thread(target=server.handle_request)
thread.start()
url = "http://%s:%s?%s" % (webserver_ip, str(server.server_port), urllib.quote_plus(filename, "/"))
print "URL & content-type: ", url, req_handler.content_type
# create another webserver to handle a request for the subtitles file, if specified in the subtitles parameter
sub = None
if subtitles:
if os.path.isfile(subtitles):
sub_port = 0
if subtitles_port is not None:
sub_port = int(subtitles_port)
sub_server = BaseHTTPServer.HTTPServer((webserver_ip, sub_port), SubRequestHandler)
thread2 = Thread(target=sub_server.handle_request)
thread2.start()
sub = "http://%s:%s?%s" % (webserver_ip, str(sub_server.server_port), urllib.quote_plus(subtitles, "/"))
print "sub URL: ", sub
else:
print "Subtitles file %s not found" % subtitles
load(cast, url, req_handler.content_type, sub, subtitles_language)
def load(cast, url, mimetype, sub=None, sub_language=None):
""" load a chromecast instance with a url and wait for idle state """
try:
print "loading media..."
cast.load(url, mimetype, sub, sub_language)
# wait for playback to complete before exiting
print "waiting for player to finish - press ctrl-c to stop..."
idle = False
while not idle:
time.sleep(1)
idle = cast.is_idle()
except KeyboardInterrupt:
print
print "stopping..."
cast.stop()
finally:
print "done"
def playurl(url, device_name=None):
""" play a remote HTTP resource on the chromecast """
print_ident()
def get_resp(url):
url_parsed = urlparse.urlparse(url)
scheme = url_parsed.scheme
host = url_parsed.netloc
path = url.split(host, 1)[-1]
conn = None
if scheme == "https":
conn = httplib.HTTPSConnection(host)
else:
conn = httplib.HTTPConnection(host)
conn.request("HEAD", path)
resp = conn.getresponse()
return resp
def get_full_url(url, location):
url_parsed = urlparse.urlparse(url)
scheme = url_parsed.scheme
host = url_parsed.netloc
if location.startswith("/") is False:
path = url.split(host, 1)[-1]
if path.endswith("/"):
path = path.rsplit("/", 2)[0]
else:
path = path.rsplit("/", 1)[0] + "/"
location = path + location
full_url = scheme + "://" + host + location
return full_url
resp = get_resp(url)
if resp.status != 200:
redirect_codes = [ 301, 302, 303, 307, 308 ]
if resp.status in redirect_codes:
redirects = 0
while resp.status in redirect_codes:
redirects += 1
if redirects > 9:
sys.exit("HTTP Error: Too many redirects")
headers = resp.getheaders()
for header in headers:
if len(header) > 1:
if header[0].lower() == "location":
redirect_location = header[1]
if redirect_location.startswith("http") is False:
redirect_location = get_full_url(url, redirect_location)
print "Redirecting to " + redirect_location
resp = get_resp(redirect_location)
if resp.status != 200:
sys.exit("HTTP error:" + str(resp.status) + " - " + resp.reason)
else:
sys.exit("HTTP error:" + str(resp.status) + " - " + resp.reason)
print "Found HTTP resource"
headers = resp.getheaders()
mimetype = None
for header in headers:
if len(header) > 1:
if header[0].lower() == "content-type":
mimetype = header[1]
if mimetype != None:
print "content-type:", mimetype
else:
mimetype = "video/mp4"
print "resource does not specify mimetype - using default:", mimetype
cast = CCMediaController(device_name=device_name)
load(cast, url, mimetype)
def pause(device_name=None):
""" pause playback """
CCMediaController(device_name=device_name).pause()
def unpause(device_name=None):
""" continue playback """
CCMediaController(device_name=device_name).play()
def stop(device_name=None):
""" stop playback and quit the media player app on the chromecast """
CCMediaController(device_name=device_name).stop()
def get_status(device_name=None):
""" print the status of the chromecast device """
print CCMediaController(device_name=device_name).get_status()
def volume_up(device_name=None):
""" raise the volume by 0.1 """
CCMediaController(device_name=device_name).set_volume_up()
def volume_down(device_name=None):
""" lower the volume by 0.1 """
CCMediaController(device_name=device_name).set_volume_down()
def set_volume(v, device_name=None):
""" set the volume to level between 0 and 1 """
CCMediaController(device_name=device_name).set_volume(v)
def list_devices():
print "Searching for devices, please wait..."
device_ips = cc_device_finder.search_network(device_limit=None, time_limit=10)
print "%d devices found" % len(device_ips)
for device_ip in device_ips:
print device_ip, ":", cc_device_finder.get_device_name(device_ip)
def print_ident():
""" display initial messages """
print
print "-----------------------------------------"
print
print "Stream2Chromecast version:%s" % VERSION
print
print "Copyright (C) 2014-2016 Pat Carter"
print "GNU General Public License v3.0"
print "https://www.gnu.org/licenses/gpl-3.0.html"
print
print "-----------------------------------------"
print
def validate_args(args):
""" validate that there are the correct number of arguments """
if len(args) < 1:
sys.exit(USAGETEXT)
if args[0] == "-setvol" and len(args) < 2:
sys.exit(USAGETEXT)
def get_named_arg_value(arg_name, args, integer=False):
""" get a argument value by name """
arg_val = None
if arg_name in args:
arg_pos = args.index(arg_name)
arg_name = args.pop(arg_pos)
if len(args) > (arg_pos + 1):
arg_val = args.pop(arg_pos)
if integer:
int_arg_val = 0
if arg_val is not None:
try:
int_arg_val = int(arg_val)
except ValueError:
print "Invalid integer parameter, defaulting to zero. Parameter name:", arg_name
arg_val = int_arg_val
return arg_val
def run():
""" main execution """
args = sys.argv[1:]
# optional device name parm. if not specified, device_name = None (the first device found will be used).
device_name = get_named_arg_value("-devicename", args)
# optional transcoder parm. if not specified, ffmpeg will be used, if installed, otherwise avconv.
transcoder = get_named_arg_value("-transcoder", args)
# optional server port parm. if not specified, a random available port will be used
server_port = get_named_arg_value("-port", args)
# optional transcode options parm. if specified, these options will be passed to the transcoder to be applied to the output
transcode_options = get_named_arg_value("-transcodeopts", args)
# optional transcode options parm. if specified, these options will be passed to the transcoder to be applied to the input data
transcode_input_options = get_named_arg_value("-transcodeinputopts", args)
# optional transcode bufsize parm. if specified, the transcoder will buffer approximately this many bytes of output
transcode_bufsize = get_named_arg_value("-transcodebufsize", args, integer=True)
# optional subtitle parm. if specified, the specified subtitles will be played.
subtitles = get_named_arg_value("-subtitles", args)
# optional subtitle_port parm. if not specified, a random available port will be used.
subtitles_port = get_named_arg_value("-subtitles_port", args)
# optional subtitle_language parm. if not specified en-US will be used.
subtitles_language = get_named_arg_value("-subtitles_language", args)
validate_args(args)
if args[0] == "-stop":
stop(device_name=device_name)
elif args[0] == "-pause":
pause(device_name=device_name)
elif args[0] == "-continue":
unpause(device_name=device_name)
elif args[0] == "-status":
get_status(device_name=device_name)
elif args[0] == "-setvol":
set_volume(float(args[1]), device_name=device_name)
elif args[0] == "-volup":
volume_up(device_name=device_name)
elif args[0] == "-voldown":
volume_down(device_name=device_name)
elif args[0] == "-mute":
set_volume(0, device_name=device_name)
elif args[0] == "-transcode":
arg2 = args[1]
play(arg2, transcode=True, transcoder=transcoder, transcode_options=transcode_options, transcode_input_options=transcode_input_options, transcode_bufsize=transcode_bufsize,
device_name=device_name, server_port=server_port, subtitles=subtitles, subtitles_port=subtitles_port,
subtitles_language=subtitles_language)
elif args[0] == "-playurl":
arg2 = args[1]
playurl(arg2, device_name=device_name)
elif args[0] == "-devicelist":
list_devices()
else:
play(args[0], device_name=device_name, server_port=server_port, subtitles=subtitles,
subtitles_port=subtitles_port, subtitles_language=subtitles_language)
if __name__ == "__main__":
run()
|
apache-2.0
| 8,401,023,719,317,577,000 | -3,400,453,965,084,740,000 | 29.610276 | 180 | 0.59946 | false |
craigcitro/gsutil
|
gslib/tests/test_cat.py
|
16
|
4976
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cat command."""
from __future__ import absolute_import
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import RUN_S3_TESTS
class TestCat(testcase.GsUtilIntegrationTestCase):
"""Integration tests for cat command."""
def test_cat_range(self):
"""Tests cat command with various range arguments."""
key_uri = self.CreateObject(contents='0123456789')
# Test various invalid ranges.
stderr = self.RunGsUtil(['cat', '-r -', suri(key_uri)],
return_stderr=True, expected_status=1)
self.assertIn('Invalid range', stderr)
stderr = self.RunGsUtil(['cat', '-r a-b', suri(key_uri)],
return_stderr=True, expected_status=1)
self.assertIn('Invalid range', stderr)
stderr = self.RunGsUtil(['cat', '-r 1-2-3', suri(key_uri)],
return_stderr=True, expected_status=1)
self.assertIn('Invalid range', stderr)
stderr = self.RunGsUtil(['cat', '-r 1.7-3', suri(key_uri)],
return_stderr=True, expected_status=1)
self.assertIn('Invalid range', stderr)
# Test various valid ranges.
stdout = self.RunGsUtil(['cat', '-r 1-3', suri(key_uri)],
return_stdout=True)
self.assertEqual('123', stdout)
stdout = self.RunGsUtil(['cat', '-r 8-', suri(key_uri)],
return_stdout=True)
self.assertEqual('89', stdout)
stdout = self.RunGsUtil(['cat', '-r -3', suri(key_uri)],
return_stdout=True)
self.assertEqual('789', stdout)
def test_cat_version(self):
"""Tests cat command on versioned objects."""
bucket_uri = self.CreateVersionedBucket()
# Create 2 versions of an object.
uri1 = self.CreateObject(bucket_uri=bucket_uri, contents='data1')
uri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name=uri1.object_name, contents='data2')
stdout = self.RunGsUtil(['cat', suri(uri1)], return_stdout=True)
# Last version written should be live.
self.assertEqual('data2', stdout)
# Using either version-specific URI should work.
stdout = self.RunGsUtil(['cat', uri1.version_specific_uri],
return_stdout=True)
self.assertEqual('data1', stdout)
stdout = self.RunGsUtil(['cat', uri2.version_specific_uri],
return_stdout=True)
self.assertEqual('data2', stdout)
if RUN_S3_TESTS:
# S3 GETs of invalid versions return 400s.
# Also, appending between 1 and 3 characters to the version_id can
# result in a success (200) response from the server.
stderr = self.RunGsUtil(['cat', uri2.version_specific_uri + '23456'],
return_stderr=True, expected_status=1)
self.assertIn('BadRequestException: 400', stderr)
else:
# Attempting to cat invalid version should result in an error.
stderr = self.RunGsUtil(['cat', uri2.version_specific_uri + '23'],
return_stderr=True, expected_status=1)
self.assertIn('No URLs matched', stderr)
def test_cat_multi_arg(self):
"""Tests cat command with multiple arguments."""
bucket_uri = self.CreateBucket()
data1 = '0123456789'
data2 = 'abcdefghij'
obj_uri1 = self.CreateObject(bucket_uri=bucket_uri, contents=data1)
obj_uri2 = self.CreateObject(bucket_uri=bucket_uri, contents=data2)
stdout, stderr = self.RunGsUtil(
['cat', suri(obj_uri1), suri(bucket_uri) + 'nonexistent'],
return_stdout=True, return_stderr=True, expected_status=1)
# First object should print, second should produce an exception.
self.assertIn(data1, stdout)
self.assertIn('NotFoundException', stderr)
stdout, stderr = self.RunGsUtil(
['cat', suri(bucket_uri) + 'nonexistent', suri(obj_uri1)],
return_stdout=True, return_stderr=True, expected_status=1)
# If first object is invalid, exception should halt output immediately.
self.assertNotIn(data1, stdout)
self.assertIn('NotFoundException', stderr)
# Two valid objects should both print successfully.
stdout = self.RunGsUtil(['cat', suri(obj_uri1), suri(obj_uri2)],
return_stdout=True)
self.assertIn(data1 + data2, stdout)
|
apache-2.0
| 8,115,844,234,273,341,000 | -7,964,450,801,963,474,000 | 44.236364 | 76 | 0.646704 | false |
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/salt/modules/chef.py
|
2
|
5099
|
# -*- coding: utf-8 -*-
'''
Execute chef in server or solo mode
'''
# Import Python libs
from __future__ import absolute_import
import logging
import os
import tempfile
# Import Salt libs
import salt.utils
import salt.utils.decorators as decorators
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if chef is installed
'''
if not salt.utils.which('chef-client'):
return (False, 'Cannot load chef module: chef-client not found')
return True
def _default_logfile(exe_name):
'''
Retrieve the logfile name
'''
if salt.utils.is_windows():
tmp_dir = os.path.join(__opts__['cachedir'], 'tmp')
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
logfile_tmp = tempfile.NamedTemporaryFile(dir=tmp_dir,
prefix=exe_name,
suffix='.log',
delete=False)
logfile = logfile_tmp.name
logfile_tmp.close()
else:
logfile = salt.utils.path_join(
'/var/log',
'{0}.log'.format(exe_name)
)
return logfile
@decorators.which('chef-client')
def client(whyrun=False,
localmode=False,
logfile=None,
**kwargs):
'''
Execute a chef client run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.client server=https://localhost
server
The chef server URL
client_key
Set the client key file location
config
The configuration file to use
config-file-jail
Directory under which config files are allowed to be loaded
(no client.rb or knife.rb outside this path will be loaded).
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
localmode
Point chef-client at local repository if True
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
pid
Set the PID file location, defaults to /tmp/chef-client.pid
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
runlist
Permanently replace current run list with specified items
user
User to set privilege to
validation_key
Set the validation key file location, used for registering new clients
whyrun
Enable whyrun mode when set to True
'''
if logfile is None:
logfile = _default_logfile('chef-client')
args = ['chef-client',
'--no-color',
'--once',
'--logfile "{0}"'.format(logfile),
'--format doc']
if whyrun:
args.append('--why-run')
if localmode:
args.append('--local-mode')
return _exec_cmd(*args, **kwargs)
@decorators.which('chef-solo')
def solo(whyrun=False,
logfile=None,
**kwargs):
'''
Execute a chef solo run and return a dict with the stderr, stdout,
return code, and pid.
CLI Example:
.. code-block:: bash
salt '*' chef.solo override-runlist=test
config
The configuration file to use
environment
Set the Chef Environment on the node
group
Group to set privilege to
json-attributes
Load attributes from a JSON file or URL
log_level
Set the log level (debug, info, warn, error, fatal)
logfile
Set the log file location
node-name
The node name for this client
override-runlist
Replace current run list with specified items for a single run
recipe-url
Pull down a remote gzipped tarball of recipes and untar it to
the cookbook cache
run-lock-timeout
Set maximum duration to wait for another client run to finish,
default is indefinitely.
user
User to set privilege to
whyrun
Enable whyrun mode when set to True
'''
if logfile is None:
logfile = _default_logfile('chef-solo')
args = ['chef-solo',
'--no-color',
'--logfile "{0}"'.format(logfile),
'--format doc']
if whyrun:
args.append('--why-run')
return _exec_cmd(*args, **kwargs)
def _exec_cmd(*args, **kwargs):
# Compile the command arguments
cmd_args = ' '.join(args)
cmd_kwargs = ''.join([
' --{0} {1}'.format(k, v)
for k, v in six.iteritems(kwargs) if not k.startswith('__')
])
cmd_exec = '{0}{1}'.format(cmd_args, cmd_kwargs)
log.debug('Chef command: {0}'.format(cmd_exec))
return __salt__['cmd.run_all'](cmd_exec, python_shell=False)
|
apache-2.0
| -1,005,952,154,116,617,500 | 5,378,077,495,326,983,000 | 22.283105 | 78 | 0.589135 | false |
funkring/fdoo
|
addons-funkring/util_test/models/util_test.py
|
1
|
2062
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import base64
import logging
from openerp import models
from openerp import tools
_logger = logging.getLogger(__name__)
class UtilTest(models.AbstractModel):
_name = "util.test"
def _testDownloadAttachments(self, obj=None, prefix=None):
if not obj:
obj = self
test_download = tools.config.get("test_download")
res = []
if test_download:
att_obj = obj.env["ir.attachment"]
for att in att_obj.search(
[("res_model", "=", obj._model._name), ("res_id", "=", obj.id)]
):
file_name = att.datas_fname
if prefix:
file_name = "%s%s" % (prefix, file_name)
download_path = os.path.join(test_download, att.datas_fname)
with open(download_path, "wb") as f:
if att.datas:
f.write(base64.decodestring(att.datas))
res.append(download_path)
_logger.info("Download %s" % download_path)
return res
|
agpl-3.0
| -3,986,360,817,988,399,600 | -4,053,035,199,388,927,000 | 35.821429 | 79 | 0.544132 | false |
ak2703/edx-platform
|
lms/djangoapps/student_profile/views.py
|
11
|
4214
|
""" Views for a student's profile information. """
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django_countries import countries
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.views.decorators.http import require_http_methods
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.user_api.accounts.api import get_account_settings
from openedx.core.djangoapps.user_api.accounts.serializers import PROFILE_IMAGE_KEY_PREFIX
from openedx.core.djangoapps.user_api.errors import UserNotFound, UserNotAuthorized
from openedx.core.djangoapps.user_api.preferences.api import get_user_preferences
from student.models import User
from microsite_configuration import microsite
@login_required
@require_http_methods(['GET'])
def learner_profile(request, username):
"""Render the profile page for the specified username.
Args:
request (HttpRequest)
username (str): username of user whose profile is requested.
Returns:
HttpResponse: 200 if the page was sent successfully
HttpResponse: 302 if not logged in (redirect to login page)
HttpResponse: 405 if using an unsupported HTTP method
Raises:
Http404: 404 if the specified user is not authorized or does not exist
Example usage:
GET /account/profile
"""
try:
return render_to_response(
'student_profile/learner_profile.html',
learner_profile_context(request.user, username, request.user.is_staff, request.build_absolute_uri)
)
except (UserNotAuthorized, UserNotFound, ObjectDoesNotExist):
raise Http404
def learner_profile_context(logged_in_user, profile_username, user_is_staff, build_absolute_uri_func):
"""Context for the learner profile page.
Args:
logged_in_user (object): Logged In user.
profile_username (str): username of user whose profile is requested.
user_is_staff (bool): Logged In user has staff access.
build_absolute_uri_func ():
Returns:
dict
Raises:
ObjectDoesNotExist: the specified profile_username does not exist.
"""
profile_user = User.objects.get(username=profile_username)
own_profile = (logged_in_user.username == profile_username)
account_settings_data = get_account_settings(logged_in_user, profile_username)
# Account for possibly relative URLs.
for key, value in account_settings_data['profile_image'].items():
if key.startswith(PROFILE_IMAGE_KEY_PREFIX):
account_settings_data['profile_image'][key] = build_absolute_uri_func(value)
preferences_data = get_user_preferences(profile_user, profile_username)
context = {
'data': {
'profile_user_id': profile_user.id,
'default_public_account_fields': settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields'],
'default_visibility': settings.ACCOUNT_VISIBILITY_CONFIGURATION['default_visibility'],
'accounts_api_url': reverse("accounts_api", kwargs={'username': profile_username}),
'preferences_api_url': reverse('preferences_api', kwargs={'username': profile_username}),
'preferences_data': preferences_data,
'account_settings_data': account_settings_data,
'profile_image_upload_url': reverse('profile_image_upload', kwargs={'username': profile_username}),
'profile_image_remove_url': reverse('profile_image_remove', kwargs={'username': profile_username}),
'profile_image_max_bytes': settings.PROFILE_IMAGE_MAX_BYTES,
'profile_image_min_bytes': settings.PROFILE_IMAGE_MIN_BYTES,
'account_settings_page_url': reverse('account_settings'),
'has_preferences_access': (logged_in_user.username == profile_username or user_is_staff),
'own_profile': own_profile,
'country_options': list(countries),
'language_options': settings.ALL_LANGUAGES,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
}
return context
|
agpl-3.0
| 1,221,803,300,884,668,400 | 7,108,304,843,633,567,000 | 42.443299 | 111 | 0.700047 | false |
ssuarez6/lyra
|
single_variable_eq/python/multiple_root.py
|
1
|
1385
|
from astropy.table import Table
import parser, decimal, plot
c = raw_input("Absolute(a) or Relative(r) error?\n>")
abs_error = False
if c is 'a':
abs_error = True
fn = raw_input("\nType the function:\n>f(x) = ")
fn_p = raw_input("\nType the differential of the function:\n>f'(x) = ")
fn_p_p = raw_input("\nType the second differential of the function:\n>f''(x) = ")
x0 = float(raw_input("\nType the initial value\n>"))
tol = float(raw_input("\nType the tolerance\n>"))
iterations = int(raw_input("\nType the maximum iterations\n>"))
y = parser.eval(fn, x0)
error = tol + 1
cont = 0
rows = []
s = "|\titers\t|\t\tXn\t|\t\tf(Xn)\t\t|\t\tError\t\t|\n"
while y != 0 and error > tol and cont < iter:
y = parser.eval(fn, x0)
y_prime = parser.eval(fn_p, x0)
y_p_p = parser.eval(fn_p_p, x0)
xn = x0 - (y*y_prime)/((y_prime*y_prime) - y*y_p_p)
if abs_error:
error = abs(xn-x0)
else:
error = abs((xn-x0)/xn)
x0 = xn
#s = s + "|\t"+str(cont)+"\t|\t"+str(xn)+"\t|\t"+str(y)+"\t\t|\t"+str(error)+"\t|\n"
v = (cont,xn,y,error)
rows.append(v)
cont = cont+1
t = Table(rows=rows, names=('Iteraciones', 'xn', 'F(xn)', 'Error'))
print(t)
print("")
if y is 0:
print x0,"is a root"
elif error <= tol:
print x0,"is a root with error=",error
else:
print "couldn't find any root after",cont,"iterations"
#print s
plot.graficar(fn)
|
gpl-2.0
| -8,192,726,917,480,040,000 | -1,687,242,338,562,401,000 | 30.477273 | 88 | 0.597112 | false |
openelisglobal/openelisglobal-sandbox
|
installerTemplate/linux/fixes/collapseTests/testReconfiguration_Linux.py
|
7
|
19653
|
#! /usr/bin/python
from optparse import OptionParser
from pyPgSQL import PgSQL
from decimal import *
__author__ = "paulsc"
__date__ = "$Apr 20, 2011 1:29:05 PM$"
conn = ""
cursor = ""
test_fields = "id, method_id, uom_id, description, loinc, reporting_description, sticker_req_flag, is_active, active_begin, active_end, is_reportable, time_holding, time_wait, time_ta_average, time_ta_warning, time_ta_max, label_qty, lastupdated, label_id, test_trailer_id, test_section_id, scriptlet_id, test_format_id, local_abbrev, sort_order, name, display_key"
test_sample_type_map = dict()
type_id_to_name_map = dict()
prefix_to_id_map = dict()
def connect( password, database):
global conn
global cursor
conn = PgSQL.connect(host="localhost", user="clinlims", database=database, password=password)
cursor = conn.cursor()
def update_database( password, database):
connect(password, database)
get_tests_with_common_prefixs()
#list_prefixs()
collapse_associations()
conn.commit()
cursor.close()
conn.close()
def get_tests_with_common_prefixs():
global prefix_to_id_map
cursor.execute("select id, description from clinlims.test")
rows = cursor.fetchall()
tmpMap = dict()
for row in rows:
id, name = row
prefix = get_name_prefix( name )
if prefix in tmpMap:
collapsing_ids = tmpMap[prefix]
else:
collapsing_ids = []
tmpMap[prefix] = collapsing_ids
collapsing_ids.append( id )
for key in tmpMap.keys():
if len(tmpMap[key]) > 1:
prefix_to_id_map[key] = tmpMap[key]
def get_name_prefix( name ):
#special case Cell Epitheliales
if name.find("Cell Epith") != -1:
return "Cell Epith"
return name.split("(", 1)[0].rstrip()
def list_prefixs():
for key in prefix_to_id_map:
print key
def collapse_associations():
for key in prefix_to_id_map:
associated_test_ids = prefix_to_id_map[key]
focal_test_id = ""
consolidate = 1
if special_case(key):
print "Special casing " + key
focal_test_id = get_special_case_focal_id_for( key, associated_test_ids )
associated_test_ids.remove( focal_test_id )
else:
focal_test_id = associated_test_ids.pop()
if test_results_differ(key, focal_test_id, associated_test_ids ):
print "\tReference test id: " + str(focal_test_id) + " comparing test ids " + list_to_comma_separated_string( associated_test_ids )
consolidate = False
if result_limits_differ(key, focal_test_id, associated_test_ids ):
print "\tReference test id: " + str(focal_test_id) + " comparing test ids " + list_to_comma_separated_string( associated_test_ids )
consolidate = False
if test_reflexes_differ(key, focal_test_id, associated_test_ids ):
print "\tReference test id: " + str(focal_test_id) + " comparing test ids " + list_to_comma_separated_string( associated_test_ids )
consolidate = False
if test_analyte_differ(key, focal_test_id, associated_test_ids):
print "\tReference test id: " + str(focal_test_id) + " comparing test ids " + list_to_comma_separated_string( associated_test_ids )
consolidate = False
if not consolidate:
continue
print "Consolidating " + key
idString = list_to_comma_separated_string( associated_test_ids)
focal_test_id = str( focal_test_id )
cursor.execute("delete from clinlims.result_limits where test_id in ( " + idString + ")")
cursor.execute("delete from clinlims.test_reflex where test_id in ( " + idString + " )")
cursor.execute("delete from clinlims.test_analyte where test_id in ( " + idString + " )")
update_test_result_id( idString, focal_test_id, key)
cursor.execute("delete from clinlims.test_result where test_id in ( " + idString + " )")
cursor.execute("update clinlims.sampletype_test set test_id = %s where test_id in ( " + idString + " )", (focal_test_id ))
cursor.execute("update clinlims.analysis set test_id = %s where test_id in ( " + idString + " )", (focal_test_id))
cursor.execute("update clinlims.referral_result set test_id = %s where test_id in ( " + idString + " )", (focal_test_id))
cursor.execute("delete from clinlims.test where id in ( " + idString + " )")
rename = rename_test(focal_test_id )
if rename.find("Culture") != -1:
update_results_table( focal_test_id )
def special_case( prefix ):
return prefix.find("Bact") != -1 or \
prefix.find("Chlore") != -1 or \
prefix.find("Vitesse") != -1 or \
prefix.find("VIH Elisa") != -1 or \
prefix.find("Malaria") != -1 or \
prefix.find("Mantoux") != -1 or \
prefix.find("VIH Western Blot") != -1 or \
prefix.find("Cell Epith") != -1 or \
prefix.find("Culture") != -1 or \
prefix.find("VIH Test Rapide") != -1
def get_special_case_focal_id_for( prefix, test_id_list ):
if prefix.find("Bact") != -1:
return find_id_for_type( "Urine", test_id_list )
elif prefix.find("Chlore") != -1:
return find_id_for_type( "Serum", test_id_list )
elif prefix.find("Vitesse") != -1:
return find_id_for_type( "Sang", test_id_list )
elif prefix.find("VIH Elisa") != -1:
return test_id_list[0]
elif prefix.find("Cell Epith") != -1:
return find_correct_cell_epith( test_id_list)
elif prefix.find("Malaria") != -1:
return find_correct_malaria( test_id_list)
elif prefix.find("Mantoux") != -1:
return find_correct_matoux( test_id_list)
elif prefix.find("VIH Western Blot") != -1:
return find_id_for_type( "DBS", test_id_list )
elif prefix.find("VIH Test Rapide") != -1:
return find_id_for_type( "DBS", test_id_list )
elif prefix.find("Culture") != -1:
return find_correct_culture( test_id_list)
return ""
def find_id_for_type( name, test_id_list ):
for id in test_id_list:
cursor.execute("select description from clinlims.test where id = %s", ( str(id)))
found_name = cursor.fetchone()[0]
if found_name.find( name ) != -1:
print "\tCollapsing to " + found_name
return id
return ""
def update_test_result_id( idString, focal_test_id, key):
cursor.execute("select id, result_type, value from clinlims.result " +
" where test_result_id is not NULL and " +
" analysis_id in ( select id from clinlims.analysis where test_id in (" + idString +"))")
focal_result_type_A_or_N = False
resultList = cursor.fetchall()
for result in resultList:
id, type, value = result;
if type == 'A' or type == 'N':
if not focal_result_type_A_or_N:
cursor.execute("select id from clinlims.test_result where test_id = %s", (str(focal_test_id)))
focal_result_type_A_or_N = cursor.fetchone()[0]
cursor.execute( "update clinlims.result set test_result_id = %s where id = %s", (str(focal_result_type_A_or_N), str(id)))
elif key == "Culture":
cursor.execute("select id from clinlims.test_result where test_id = %s", (str(focal_test_id)) )
test_result_id = cursor.fetchone()[0]
#note this will result in the result type differing from the test_result result_type but that will be corrected latter
cursor.execute( "update clinlims.result set test_result_id = %s, value = (select value from clinlims.test_result tr where tr.id = %s) " +
"where id = %s", (str(test_result_id), str(test_result_id), str(id)))
elif type == 'D' or type == 'M':
cursor.execute( "select dict_entry from dictionary where id = %s", (str(value)))
result_dictionary_value = cursor.fetchone()[0]
cursor.execute("select tr.id from clinlims.test_result tr " +
" join clinlims.dictionary d on d.id = CAST( tr.value as numeric) " +
" where test_id = %s and d.dict_entry = %s", (str(focal_test_id), result_dictionary_value) )
#print "focal_id = " + str(focal_test_id) + " moving id = " + str(id) + " value = " + str(value) + " dictionary = " + result_dictionary_value
test_result_id = cursor.fetchone()
if not test_result_id and key == "VIH Test Rapide":
cursor.execute("select tr.id from clinlims.test_result tr " +
" join clinlims.dictionary d on d.id = CAST( tr.value as numeric) " +
" where test_id = " + str(focal_test_id) + " and d.dict_entry like 'Ind%'" )
test_result_id = cursor.fetchone()
cursor.execute( "update clinlims.result set test_result_id = %s, value = (select value from clinlims.test_result tr where tr.id = %s) " +
" where id = %s", (str(test_result_id[0]), str(test_result_id[0]), str(id)))
def rename_test( test_id):
cursor.execute("select description from clinlims.test where id = %s", (test_id))
full_name = cursor.fetchone()[0]
name = full_name.split("(")[0].rstrip()
print "\tNaming " + name
cursor.execute("update clinlims.test set description = %s, name= %s where id = %s", (name, name, str(test_id)))
return name
def find_correct_cell_epith( test_list_id ):
cursor.execute("select id from clinlims.test where description like '%(%' and id in ( " + list_to_comma_separated_string(test_list_id) + ")")
return cursor.fetchone()[0]
def find_correct_culture( test_list_id ):
cursor.execute("select test_id from clinlims.test_result where tst_rslt_type = 'A' and test_id in ( " + list_to_comma_separated_string(test_list_id) + ")")
return cursor.fetchone()[0]
def find_correct_malaria( test_id_list ):
for id in test_id_list:
cursor.execute("select description from clinlims.test where id = %s", ( str(id)))
found_name = str(cursor.fetchone()[0]).strip()
if found_name == "Malaria":
print "\tCollapsing to " + found_name
return id
return ""
def find_correct_matoux( test_id_list ):
for id in test_id_list:
cursor.execute("select description from clinlims.test where id = %s", ( str(id)))
found_name = cursor.fetchone()[0]
if found_name.find( "Invalid" ) == -1:
print "\tCollapsing to " + found_name
return id
return ""
def update_results_table( focal_test_id ):
cursor.execute("update clinlims.result " +
"set value = (select dict_entry from clinlims.dictionary d where d.id = CAST(value as numeric)), result_type = 'A' " +
"where analysis_id in (select id from clinlims.analysis a where a.test_id = %s ) and result_type = 'D' " , focal_test_id)
def test_results_differ(prefix, focal_test_id, associated_test_ids ):
cursor.execute("select tst_rslt_type, value from clinlims.test_result where test_id = %s", (str(focal_test_id)))
focal_results = cursor.fetchall()
if len( focal_results ):
focal_list = []
for result in focal_results:
type, value = result
if not value:
value = "none"
focal_list.append(type + value)
number_results = len( focal_results)
for id in associated_test_ids:
cursor.execute("select tst_rslt_type, value from clinlims.test_result where test_id = %s", (str(id)))
id_results = cursor.fetchall()
if len( id_results ) != number_results:
print "Not consolidating " + prefix + " because because some tests have " + str(number_results) + " test results and others have " + str(len(id_results))
return 1
for result in id_results:
type, value = result
if not value:
value = "none"
target = type + value
if not target in focal_list:
print "Not consolidating " + prefix + " because because some tests do not have the same result_type as others"
return 1
else:
for id in associated_test_ids:
cursor.execute("select tst_rslt_type, value from clinlims.test_result where test_id = %s", (str(id)))
id_results = cursor.fetchall()
if len( id_results ) != 0:
print "Not consolidating " + prefix + " because because some tests have no test results and others have " + str(len(id_results))
return 1
return False
def result_limits_differ(prefix, focal_test_id, associated_test_ids ):
cursor.execute("select CAST(test_result_type_id as text), CAST(min_age as text), CAST(max_age as text), gender, CAST(low_normal as text),CAST(high_normal as text), CAST(low_valid as text), CAST(high_valid as text) from clinlims.result_limits where test_id = %s", (str(focal_test_id)))
focal_limits = cursor.fetchall()
if len( focal_limits ):
focal_list = []
for limit in focal_limits:
focal_list.append("".join(limit))
number_limits = len( focal_limits)
for id in associated_test_ids:
cursor.execute("select CAST(test_result_type_id as text), CAST(min_age as text), CAST(max_age as text), gender, CAST(low_normal as text),CAST(high_normal as text), CAST(low_valid as text), CAST(high_valid as text) from clinlims.result_limits where test_id = %s", (str(id)))
id_limits = cursor.fetchall()
if len( id_limits ) != number_limits:
print "Not consolidating " + prefix + " because because some tests have " + str(number_limits) + " test limits and others have " + str(len(id_limits))
return 1
for limit in id_limits:
if not "".join(limit) in focal_list:
print "Not consolidating " + prefix + " because because some tests do not have the same result limits as others"
return 1
else:
for id in associated_test_ids:
cursor.execute("select test_result_type_id, min_age, max_age, gender, low_normal,high_normal, low_valid, high_valid from clinlims.result_limits where test_id = %s", (str(id)))
id_limits = cursor.fetchall()
if len( id_limits ) != 0:
print "Not consolidating " + prefix + " because because some tests have no result limits and others have " + str(len(id_limits))
return 1
return False
def test_reflexes_differ(prefix, focal_test_id, associated_test_ids ):
cursor.execute("select CAST(tst_rslt_id as text), CAST(test_analyte_id as text), CAST(coalesce(add_test_id, 1 ) as text),CAST(coalesce(sibling_reflex, 1 ) as text),CAST(coalesce(scriptlet_id, 1 ) as text) from clinlims.test_reflex where test_id = %s", (str(focal_test_id)))
focal_reflexes = cursor.fetchall()
if len( focal_reflexes ):
focal_list = []
for reflex in focal_reflexes:
print reflex
focal_list.append("".join(reflex))
number_reflexes = len( focal_reflexes)
for id in associated_test_ids:
cursor.execute("select CAST(tst_rslt_id as text), CAST(test_analyte_id as text), CAST(coalesce(add_test_id, 1 ) as text),CAST(coalesce(sibling_reflex, 1 ) as text),CAST(coalesce(scriptlet_id, 1 ) as text) from clinlims.test_reflex where test_id = %s", (str(id)))
id_reflexes = cursor.fetchall()
if len( id_reflexes ) != number_reflexes:
print "Not consolidating " + prefix + " because because some tests have " + str(number_reflexes) + " test reflexes and others have " + str(len(id_reflexes))
return 1
for reflex in id_reflexes:
if not "".join(reflex) in focal_list:
print "Not consolidating " + prefix + " because because some tests do not have the same test_reflexes as others"
return 1
else:
for id in associated_test_ids:
cursor.execute("select CAST(tst_rslt_id as text), CAST(test_analyte_id as text), CAST(coalesce(add_test_id, 1 ) as text),CAST(coalesce(sibling_reflex, 1 ) as text),CAST(coalesce(scriptlet_id, 1 ) as text) from clinlims.test_reflex where test_id = %s", (str(id)))
id_reflexes = cursor.fetchall()
if len( id_reflexes ) != 0:
print "Not consolidating " + prefix + " because because some tests have no test reflexes and others have " + str(len(id_reflexes))
return 1
return False
def test_analyte_differ(prefix, focal_test_id, associated_test_ids ):
cursor.execute("select CAST(analyte_id as text), CAST(result_group as text), CAST(result_group as text), COALESCE(testalyt_type, ' ') from clinlims.test_analyte where test_id = %s", (str(focal_test_id)))
focal_analytes = cursor.fetchall()
if len( focal_analytes ):
focal_list = []
for test_analyte in focal_analytes:
focal_list.append("".join(test_analyte))
number_analytes = len( focal_analytes)
for id in associated_test_ids:
cursor.execute("select CAST(analyte_id as text), CAST(result_group as text), CAST(result_group as text), COALESCE(testalyt_type, ' ' ) from clinlims.test_analyte where test_id = %s", (str(id)))
id_analytes = cursor.fetchall()
if len( id_analytes ) != number_analytes:
print "Not consolidating " + prefix + " because because some tests have " + str(number_analytes) + " test analytes and others have " + str(len(id_analytes))
return 1
for test_analyte in id_analytes:
if not "".join(test_analyte) in focal_list:
print "Not consolidating " + prefix + " because because some tests do not have the same test_analytes as others"
else:
for id in associated_test_ids:
cursor.execute("select CAST(analyte_id as text), CAST(result_group as text), CAST(result_group as text), COALESCE(testalyt_type, ' ' ) from clinlims.test_analyte where test_id = %s", (str(id)))
id_analytes = cursor.fetchall()
if len( id_analytes ) != 0:
print "Not consolidating " + prefix + " because because some tests have no test analytes and others have " + str(len(id_analytes))
return 1
return False
def list_to_comma_separated_string( list ):
new_string = ""
use_separator = False
for id in list:
if not id:
continue
if use_separator:
new_string += ", "
new_string += str(id)
use_separator = 1
return new_string
if __name__ == "__main__":
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="database", type="string",
help="database to which the change should be applied")
parser.add_option("-p", "--password", dest="password", type="string",
help="The password for the database, assumes user clinlims")
(options, args) = parser.parse_args()
if not options.database or not options.password:
parser.error("Both password and database are required Use -h for help")
update_database( options.password, options.database )
|
mpl-2.0
| 401,926,605,306,103,000 | -5,283,089,694,036,018,000 | 45.242353 | 365 | 0.603928 | false |
fedora-infra/python-fedora
|
fedora/urlutils.py
|
1
|
3429
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Red Hat, Inc.
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
'''
Functions to manipulate urls.
.. versionadded:: 0.3.17
.. moduleauthor:: John (J5) Palmieri <[email protected]>
.. moduleauthor:: Toshio Kuratomi <[email protected]>
'''
from kitchen.iterutils import isiterable
from six.moves.urllib.parse import parse_qs, urlencode, urlparse, urlunparse
def update_qs(uri, new_params, overwrite=True):
'''Helper function for updating query string values.
Similar to calling update on a dictionary except we modify the query
string of the uri instead of another dictionary.
:arg uri: URI to modify
:arg new_params: Dict of new query parameters to add.
:kwarg overwrite: If True (default), any old query parameter with the same
name as a new query parameter will be overwritten. If False, the new
query parameters will be appended to a list with the old parameters at
the start of the list.
:returns: URI with the new parameters added.
'''
loc = list(urlparse(uri))
query_dict = parse_qs(loc[4])
if overwrite:
# Overwrite any existing values with the new values
query_dict.update(new_params)
else:
# Add new values in addition to the existing parameters
# For every new entry
for key in new_params:
# If the entry already is present
if key in query_dict:
if isiterable(new_params[key]):
# If the new entry is a non-string iterable
try:
# Try to add the new values to the existing entry
query_dict[key].extend(new_params[key])
except AttributeError:
# Existing value wasn't a list, make it one
query_dict[key] = [query_dict[key], new_params[key]]
else:
# The new entry is a scalar, so try to append it
try:
query_dict[key].append(new_params[key])
except AttributeError:
# Existing value wasn't a list, make it one
query_dict[key] = [query_dict[key], new_params[key]]
else:
# No previous entry, just set to the new entry
query_dict[key] = new_params[key]
# seems that we have to sanitize a bit here
query_list = []
for key, value in query_dict.items():
if isiterable(value):
for item in value:
query_list.append((key, item))
continue
query_list.append((key, value))
loc[4] = urlencode(query_list)
return urlunparse(loc)
__all__ = ['update_qs']
|
lgpl-2.1
| 8,520,250,441,508,673,000 | 463,485,536,644,754,560 | 37.1 | 78 | 0.621172 | false |
mgaitan/scipy
|
scipy/io/harwell_boeing/_fortran_format_parser.py
|
127
|
9092
|
"""
Preliminary module to handle fortran formats for IO. Does not use this outside
scipy.sparse io for now, until the API is deemed reasonable.
The *Format classes handle conversion between fortran and python format, and
FortranFormatParser can create *Format instances from raw fortran format
strings (e.g. '(3I4)', '(10I3)', etc...)
"""
from __future__ import division, print_function, absolute_import
import re
import warnings
import numpy as np
__all__ = ["BadFortranFormat", "FortranFormatParser", "IntFormat", "ExpFormat"]
TOKENS = {
"LPAR": r"\(",
"RPAR": r"\)",
"INT_ID": r"I",
"EXP_ID": r"E",
"INT": r"\d+",
"DOT": r"\.",
}
class BadFortranFormat(SyntaxError):
pass
def number_digits(n):
return int(np.floor(np.log10(np.abs(n))) + 1)
class IntFormat(object):
@classmethod
def from_number(cls, n, min=None):
"""Given an integer, returns a "reasonable" IntFormat instance to represent
any number between 0 and n if n > 0, -n and n if n < 0
Parameters
----------
n : int
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : IntFormat
IntFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
without losing precision. For example, IntFormat.from_number(1) will
return an IntFormat instance of width 2, so that any 0 and 1 may be
represented as 1-character strings without loss of information.
"""
width = number_digits(n) + 1
if n < 0:
width += 1
repeat = 80 // width
return cls(width, min, repeat=repeat)
def __init__(self, width, min=None, repeat=None):
self.width = width
self.repeat = repeat
self.min = min
def __repr__(self):
r = "IntFormat("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "I%d" % self.width
if self.min:
r += ".%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width) + "d"
class ExpFormat(object):
@classmethod
def from_number(cls, n, min=None):
"""Given a float number, returns a "reasonable" ExpFormat instance to
represent any number between -n and n.
Parameters
----------
n : float
max number one wants to be able to represent
min : int
minimum number of characters to use for the format
Returns
-------
res : ExpFormat
ExpFormat instance with reasonable (see Notes) computed width
Notes
-----
Reasonable should be understood as the minimal string length necessary
to avoid losing precision.
"""
# len of one number in exp format: sign + 1|0 + "." +
# number of digit for fractional part + 'E' + sign of exponent +
# len of exponent
finfo = np.finfo(n.dtype)
# Number of digits for fractional part
n_prec = finfo.precision + 1
# Number of digits for exponential part
n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))
width = 1 + 1 + n_prec + 1 + n_exp + 1
if n < 0:
width += 1
repeat = int(np.floor(80 / width))
return cls(width, n_prec, min, repeat=repeat)
def __init__(self, width, significand, min=None, repeat=None):
"""\
Parameters
----------
width : int
number of characters taken by the string (includes space).
"""
self.width = width
self.significand = significand
self.repeat = repeat
self.min = min
def __repr__(self):
r = "ExpFormat("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def fortran_format(self):
r = "("
if self.repeat:
r += "%d" % self.repeat
r += "E%d.%d" % (self.width, self.significand)
if self.min:
r += "E%d" % self.min
return r + ")"
@property
def python_format(self):
return "%" + str(self.width-1) + "." + str(self.significand) + "E"
class Token(object):
def __init__(self, type, value, pos):
self.type = type
self.value = value
self.pos = pos
def __str__(self):
return """Token('%s', "%s")""" % (self.type, self.value)
def __repr__(self):
return self.__str__()
class Tokenizer(object):
def __init__(self):
self.tokens = list(TOKENS.keys())
self.res = [re.compile(TOKENS[i]) for i in self.tokens]
def input(self, s):
self.data = s
self.curpos = 0
self.len = len(s)
def next_token(self):
curpos = self.curpos
tokens = self.tokens
while curpos < self.len:
for i, r in enumerate(self.res):
m = r.match(self.data, curpos)
if m is None:
continue
else:
self.curpos = m.end()
return Token(self.tokens[i], m.group(), self.curpos)
else:
raise SyntaxError("Unknown character at position %d (%s)"
% (self.curpos, self.data[curpos]))
# Grammar for fortran format:
# format : LPAR format_string RPAR
# format_string : repeated | simple
# repeated : repeat simple
# simple : int_fmt | exp_fmt
# int_fmt : INT_ID width
# exp_fmt : simple_exp_fmt
# simple_exp_fmt : EXP_ID width DOT significand
# extended_exp_fmt : EXP_ID width DOT significand EXP_ID ndigits
# repeat : INT
# width : INT
# significand : INT
# ndigits : INT
# Naive fortran formatter - parser is hand-made
class FortranFormatParser(object):
"""Parser for fortran format strings. The parse method returns a *Format
instance.
Notes
-----
Only ExpFormat (exponential format for floating values) and IntFormat
(integer format) for now.
"""
def __init__(self):
self.tokenizer = Tokenizer()
def parse(self, s):
self.tokenizer.input(s)
tokens = []
try:
while True:
t = self.tokenizer.next_token()
if t is None:
break
else:
tokens.append(t)
return self._parse_format(tokens)
except SyntaxError as e:
raise BadFortranFormat(str(e))
def _get_min(self, tokens):
next = tokens.pop(0)
if not next.type == "DOT":
raise SyntaxError()
next = tokens.pop(0)
return next.value
def _expect(self, token, tp):
if not token.type == tp:
raise SyntaxError()
def _parse_format(self, tokens):
if not tokens[0].type == "LPAR":
raise SyntaxError("Expected left parenthesis at position "
"%d (got '%s')" % (0, tokens[0].value))
elif not tokens[-1].type == "RPAR":
raise SyntaxError("Expected right parenthesis at position "
"%d (got '%s')" % (len(tokens), tokens[-1].value))
tokens = tokens[1:-1]
types = [t.type for t in tokens]
if types[0] == "INT":
repeat = int(tokens.pop(0).value)
else:
repeat = None
next = tokens.pop(0)
if next.type == "INT_ID":
next = self._next(tokens, "INT")
width = int(next.value)
if tokens:
min = int(self._get_min(tokens))
else:
min = None
return IntFormat(width, min, repeat)
elif next.type == "EXP_ID":
next = self._next(tokens, "INT")
width = int(next.value)
next = self._next(tokens, "DOT")
next = self._next(tokens, "INT")
significand = int(next.value)
if tokens:
next = self._next(tokens, "EXP_ID")
next = self._next(tokens, "INT")
min = int(next.value)
else:
min = None
return ExpFormat(width, significand, min, repeat)
else:
raise SyntaxError("Invalid formater type %s" % next.value)
def _next(self, tokens, tp):
if not len(tokens) > 0:
raise SyntaxError()
next = tokens.pop(0)
self._expect(next, tp)
return next
|
bsd-3-clause
| -1,188,399,007,445,378,800 | -3,438,502,284,253,084,700 | 27.955414 | 83 | 0.526067 | false |
saguziel/incubator-airflow
|
airflow/contrib/example_dags/example_qubole_operator.py
|
6
|
5686
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.contrib.operators.qubole_operator import QuboleOperator
import filecmp
import random
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2)
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False
}
# NOTE:: This is only an example DAG to highlight usage of QuboleOperator in various scenarios,
# some of the tasks may or may not work based on your QDS account setup
dag = DAG('example_qubole_operator', default_args=default_args, schedule_interval='@daily')
def compare_result(ds, **kwargs):
ti = kwargs['ti']
r1 = t1.get_results(ti)
r2 = t2.get_results(ti)
return filecmp.cmp(r1, r2)
t1 = QuboleOperator(
task_id='hive_show_table',
command_type='hivecmd',
query='show tables',
cluster_label='default',
fetch_logs=True, # If true, will fetch qubole command logs and concatenate them into corresponding airflow task logs
tags='aiflow_example_run', # To attach tags to qubole command, auto attach 3 tags - dag_id, task_id, run_id
qubole_conn_id='qubole_default', # Connection id to submit commands inside QDS, if not set "qubole_default" is used
dag=dag)
t2 = QuboleOperator(
task_id='hive_s3_location',
command_type="hivecmd",
script_location="s3n://public-qubole/qbol-library/scripts/show_table.hql",
notfiy=True,
tags=['tag1', 'tag2'],
# If the script at s3 location has any qubole specific macros to be replaced
# macros='[{"date": "{{ ds }}"}, {"name" : "abc"}]',
trigger_rule="all_done",
dag=dag)
t3 = PythonOperator(
task_id='compare_result',
provide_context=True,
python_callable=compare_result,
trigger_rule="all_done",
dag=dag)
t3.set_upstream(t1)
t3.set_upstream(t2)
options = ['hadoop_jar_cmd', 'presto_cmd', 'db_query', 'spark_cmd']
branching = BranchPythonOperator(
task_id='branching',
python_callable=lambda: random.choice(options),
dag=dag)
branching.set_upstream(t3)
join = DummyOperator(
task_id='join',
trigger_rule='one_success',
dag=dag
)
t4 = QuboleOperator(
task_id='hadoop_jar_cmd',
command_type='hadoopcmd',
sub_command='jar s3://paid-qubole/HadoopAPIExamples/jars/hadoop-0.20.1-dev-streaming.jar -mapper wc -numReduceTasks 0 -input s3://paid-qubole/HadoopAPITests/data/3.tsv -output s3://paid-qubole/HadoopAPITests/data/3_wc',
cluster_label='default',
fetch_logs=True,
dag=dag)
t5 = QuboleOperator(
task_id='pig_cmd',
command_type="pigcmd",
script_location="s3://public-qubole/qbol-library/scripts/script1-hadoop-s3-small.pig",
parameters="key1=value1 key2=value2",
trigger_rule="all_done",
dag=dag)
t4.set_upstream(branching)
t5.set_upstream(t4)
t5.set_downstream(join)
t6 = QuboleOperator(
task_id='presto_cmd',
command_type='prestocmd',
query='show tables',
dag=dag)
t7 = QuboleOperator(
task_id='shell_cmd',
command_type="shellcmd",
script_location="s3://public-qubole/qbol-library/scripts/shellx.sh",
parameters="param1 param2",
trigger_rule="all_done",
dag=dag)
t6.set_upstream(branching)
t7.set_upstream(t6)
t7.set_downstream(join)
t8 = QuboleOperator(
task_id='db_query',
command_type='dbtapquerycmd',
query='show tables',
db_tap_id=2064,
dag=dag)
t9 = QuboleOperator(
task_id='db_export',
command_type='dbexportcmd',
mode=1,
hive_table='default_qubole_airline_origin_destination',
db_table='exported_airline_origin_destination',
partition_spec='dt=20110104-02',
dbtap_id=2064,
trigger_rule="all_done",
dag=dag)
t8.set_upstream(branching)
t9.set_upstream(t8)
t9.set_downstream(join)
t10 = QuboleOperator(
task_id='db_import',
command_type='dbimportcmd',
mode=1,
hive_table='default_qubole_airline_origin_destination',
db_table='exported_airline_origin_destination',
where_clause='id < 10',
db_parallelism=2,
dbtap_id=2064,
trigger_rule="all_done",
dag=dag)
prog = '''
import scala.math.random
import org.apache.spark._
/** Computes an approximation to pi */
object SparkPi {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Spark Pi")
val spark = new SparkContext(conf)
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / n)
spark.stop()
}
}
'''
t11 = QuboleOperator(
task_id='spark_cmd',
command_type="sparkcmd",
program=prog,
language='scala',
arguments='--class SparkPi',
tags='aiflow_example_run',
dag=dag)
t11.set_upstream(branching)
t11.set_downstream(t10)
t10.set_downstream(join)
|
apache-2.0
| -6,026,101,903,571,439,000 | 8,553,358,725,127,512,000 | 28.309278 | 223 | 0.684312 | false |
40223119/w16b_test
|
static/Brython3.1.3-20150514-095342/Lib/optparse.py
|
728
|
60616
|
"""A powerful, extensible, and easy-to-use option parser.
By Greg Ward <[email protected]>
Originally distributed as Optik.
For support, use the [email protected] mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of builtins is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import builtins
if ( isinstance(self.type, type) or
(hasattr(self.type, "__name__") and
getattr(builtins, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
|
gpl-3.0
| 5,816,621,407,524,512,000 | -4,136,441,668,935,338,500 | 34.973887 | 79 | 0.566369 | false |
redapple/js2xml
|
tests/test_syntax.py
|
1
|
6887
|
import js2xml
from nose.tools import *
def test_syntax():
jscode_snippets = [
# strings
r"""
"test";
""",
r"""
"test\
multiline";
""",
# numbers
"3.14;",
"-12;",
"3.45e2;",
"0377;",
"0xFF;"
# arrays
"[]",
"[1,2]",
"[1,,2]",
"[1,,2,,3,]",
"['a', 'b','c']",
"[a, 'b', c]",
# objects
"o = {};",
"o = {a: 1};",
"o = {a: 1, b: 2};",
"o = {'c': 1, 'd': 2};",
'o = {"c": 1, "d": 2};',
'o = {"c": 1, d: "e"};',
"e = {foo: 5, bar: 6, baz: ['Baz', 'Content']};",
"e = {1: a, 2: b};",
# other primitive data types
"null;",
"undefined;",
"true;",
"false;",
# variables
r"""
var i;
""",
r"""
var i,j,k;
""",
r"""
var i = 0;
""",
r"""
var i = "test";
""",
r"""var z = 'foxes', r = 'birds';""",
r"""
var i, j, k = 0;
""",
r"""
var i=1, j, k = 2;
""",
r"""
var i = obj.prop;
""",
r"""var testObj = {};""",
r"""var testObj = [];""",
# assignements
r"""
i = b;
""",
r"""
i.a = "b";
""",
r"""
i["a"] = "b";
""",
r"""
i[a] = "b";
""",
# control structures
r"""
if (condition) {
result = expression;
};""",
r"""
if (condition) {
result = expression;
} else {
result = alternative;
};""",
r"""
if (exprA == exprB) {
result = expression;
} else if (expr2) {
result = alternative1;
} else {
result = alternative2;
};""",
"result = condition ? expression : alternative;",
# switch
r"""
switch (expr) {
case SOMEVALUE:
//statements;
break;
case ANOTHERVALUE:
//statements;
break;
default:
//statements;
break;
}
"""
# for loop
r"""
for (var i = 0; i < 5; i++) {
a = i;
}
""",
r"""
for (var i = 0; i < 5; i++) {
a = i
}
""",
r"""
for (var key in array) {
continue;
}
""",
r"""
for (;;) {
break;
}
""",
r"""
for (; i < len; i++) {
text += cars[i] + "<br>";
}
""",
r"""
for (var i = 0, len = cars.length, text = ""; i < len; i++) {
text += cars[i] + "<br>";
}
""",
"""
for (; i < len; ) {
text += cars[i] + "<br>";
i++;
}
""",
# while loop
"""
while (a<b) {
a+=1;
}
""",
"""
do {
a+=1;
} while (a<b);
""",
# with
"""
with (document) {
var a = getElementById('a');
var b = getElementById('b');
var c = getElementById('c');
};
""",
# label
r"""
loop1: for (var a = 0; a < 10; a++) {
if (a == 4) {
break loop1; // Stops after the 4th attempt
}
alert('a = ' + a);
loop2: for (var b = 0; b < 10; ++b) {
if (b == 3) {
continue loop2; // Number 3 is skipped
}
if (b == 6) {
continue loop1; // Continues the first loop, 'finished' is not shown
}
alert('b = ' + b);
}
alert('finished')
}
block1: {
alert('hello'); // Displays 'hello'
break block1;
alert('world'); // Will never get here
}
""",
# functions
"""
function foo(p) {
p = "bar";
}
""",
"""
function hello() {
alert('world');
}
""",
"""
var x = function(y) {
return y * y;
};
""",
"""
var math = {
'factorial': function factorial(n) {
if (n <= 1)
return 1;
return n * factorial(n - 1);
}
};
""",
"""
var anon = function() {
alert('I am anonymous');
};
""",
"""
anon();
""",
"""
setTimeout(function() {
alert('hello');
}, 1000)
""",
"""
(function() {
alert('foo');
}());
""",
# get/set
"""
var obj = {
get latest () {
return "latest";
}
}
""",
"""
delete obj.latest;
""",
"""
var o = {
set current (str) {
return this.log[this.log.length] = str;
},
log: []
}
""",
# new
"""var mycar = new car("Eagle", "Talon TSi", 1993);""",
# try / catch
"""
try {
throw "myException"; // generates an exception
}
catch (e) {
// statements to handle any exceptions
logMyErrors(e); // pass exception object to error handler
}
""",
"""
try {
addalert("bad call");
}
catch(e) {
document.write ("Error Message: " + e.message);
document.write ("<br />");
document.write ("Error Code: ");
document.write (e.number & 0xFFFF);
document.write ("<br />");
document.write ("Error Name: " + e.name);
}
""",
"""
try {
document.write("Outer try running...<br/>");
try {
document.write("Nested try running...<br/>");
throw new Error(301, "an error");
}
catch (e) {
document.write ("Nested catch caught " + e.message + "<br/>");
throw e;
}
finally {
document.write ("Nested finally is running...<br/>");
}
}
catch (e) {
document.write ("Outer catch caught " + e.message + "<br/>");
}
finally {
document.write ("Outer finally running");
}
""",
]
for snippet in jscode_snippets:
assert_is_not_none(js2xml.parse(snippet))
|
mit
| -5,352,209,759,684,390,000 | 846,673,280,325,868,500 | 20.061162 | 85 | 0.311747 | false |
caleblogemann/MyProjects
|
TeX/texmf/tex/latex/sagetex/makestatic.py
|
1
|
2565
|
#!/usr/bin/env python
##
## This is file `makestatic.py',
## generated with the docstrip utility.
##
## The original source files were:
##
## scripts.dtx (with options: `staticscript')
##
## This is a generated file. It is part of the SageTeX package.
##
## Copyright (C) 2008--2015 by Dan Drake <[email protected]>
##
## This program is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by the
## Free Software Foundation, either version 2 of the License, or (at your
## option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
## Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program. If not, see <http://www.gnu.org/licenses/>.
##
import sys
import time
import getopt
import os.path
from sagetexparse import DeSageTex
def usage():
print("""Usage: %s [-h|--help] [-o|--overwrite] inputfile [outputfile]
Removes SageTeX macros from `inputfile' and replaces them with the
Sage-computed results to make a "static" file. You'll need to have run
Sage on `inputfile' already.
`inputfile' can include the .tex extension or not. If you provide
`outputfile', the results will be written to a file of that name.
Specify `-o' or `--overwrite' to overwrite the file if it exists.
See the SageTeX documentation for more details.""" % sys.argv[0])
try:
opts, args = getopt.getopt(sys.argv[1:], 'ho', ['help', 'overwrite'])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
overwrite = False
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit()
elif o in ('-o', '--overwrite'):
overwrite = True
if len(args) == 0 or len(args) > 2:
print('Error: wrong number of arguments. Make sure to specify options first.\n')
usage()
sys.exit(2)
if len(args) == 2 and (os.path.exists(args[1]) and not overwrite):
print('Error: %s exists and overwrite option not specified.' % args[1])
sys.exit(1)
src, ext = os.path.splitext(args[0])
desagetexed = DeSageTex(src)
header = "%% SageTeX commands have been automatically removed from this file and\n%% replaced with plain LaTeX. Processed %s.\n" % time.strftime('%a %d %b %Y %H:%M:%S', time.localtime())
if len(args) == 2:
dest = open(args[1], 'w')
else:
dest = sys.stdout
dest.write(header)
dest.write(desagetexed.result)
|
mit
| -2,851,482,041,699,921,400 | 1,548,101,683,293,021,400 | 31.0625 | 186 | 0.699025 | false |
yamila-moreno/django
|
tests/auth_tests/test_management.py
|
7
|
22156
|
from __future__ import unicode_literals
import locale
import sys
from datetime import date
from django.apps import apps
from django.contrib.auth import management, models
from django.contrib.auth.checks import check_user_model
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import (
changepassword, createsuperuser,
)
from django.contrib.auth.models import Group, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, exceptions
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import (
SimpleTestCase, TestCase, override_settings, override_system_checks,
)
from django.utils import six
from django.utils.encoding import force_str
from django.utils.translation import ugettext_lazy as _
from .models import (
CustomUserBadRequiredFields, CustomUserNonListRequiredFields,
CustomUserNonUniqueUsername, CustomUserWithFK, Email,
)
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if six.PY2:
# getpass on Windows only supports prompt as bytestring (#19807)
assert isinstance(prompt, six.binary_type)
return inputs['password']
def mock_input(prompt):
# prompt should be encoded in Python 2. This line will raise an
# Exception if prompt contains unencoded non-ASCII on Python 2.
prompt = str(prompt)
assert str('__proxy__') not in prompt
response = ''
for key, val in inputs.items():
if force_str(key) in prompt.lower():
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = createsuperuser.input
createsuperuser.getpass = mock_getpass
createsuperuser.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
createsuperuser.input = old_input
return wrapped
return inner
class MockTTY(object):
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = six.StringIO()
self.stderr = six.StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute(username="joe", stdout=self.stdout, stderr=self.stderr)
def test_that_changepassword_command_works_with_nonascii_output(self):
"""
#21627 -- Executing the changepassword management command should allow
non-ASCII characters from the User object representation.
"""
# 'Julia' with accented 'u':
models.User.objects.create_user(username='J\xfalia', password='qwerty')
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute(username="J\xfalia", stdout=self.stdout)
@override_settings(SILENCED_SYSTEM_CHECKS=['fields.W342']) # ForeignKey(unique=True)
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_basic_usage(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="[email protected]",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, '[email protected]')
# created password should be unusable
self.assertFalse(u.has_usable_password())
@mock_inputs({'password': "nopasswd"})
def test_nolocale(self):
"""
Check that createsuperuser does not break when no locale is set. See
ticket #16017.
"""
old_getdefaultlocale = locale.getdefaultlocale
try:
# Temporarily remove locale information
locale.getdefaultlocale = lambda: (None, None)
# Call the command in this new environment
call_command(
"createsuperuser",
interactive=True,
username="[email protected]",
email="[email protected]",
verbosity=0,
stdin=MockTTY(),
)
except TypeError:
self.fail("createsuperuser fails if the OS provides no information about the current locale")
finally:
# Re-apply locale information
locale.getdefaultlocale = old_getdefaultlocale
# If we were successful, a user should have been created
u = User.objects.get(username="[email protected]")
self.assertEqual(u.email, '[email protected]')
@mock_inputs({
'password': "nopasswd",
'u\u017eivatel': 'foo', # username (cz)
'email': '[email protected]'})
def test_non_ascii_verbose_name(self):
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = _('u\u017eivatel')
new_io = six.StringIO()
try:
call_command(
"createsuperuser",
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_verbosity_zero(self):
# We can suppress output on the management command
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe2",
email="[email protected]",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
username="[email protected]",
email="[email protected]",
stdout=new_io
)
u = User._default_manager.get(username="[email protected]")
self.assertEqual(u.email, '[email protected]')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
call_command(
"createsuperuser",
interactive=False,
email="[email protected]",
date_of_birth="1976-04-01",
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="[email protected]")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = six.StringIO()
with self.assertRaises(CommandError):
call_command(
"createsuperuser",
interactive=False,
username="[email protected]",
stdout=new_io,
stderr=new_io,
)
self.assertEqual(CustomUser._default_manager.count(), 0)
def test_skip_if_not_in_TTY(self):
"""
If the command is not called from a TTY, it should be skipped and a
message should be displayed (#7423).
"""
class FakeStdin(object):
"""A fake stdin object that has isatty() return False."""
def isatty(self):
return False
out = six.StringIO()
call_command(
"createsuperuser",
stdin=FakeStdin(),
stdout=out,
interactive=True,
)
self.assertEqual(User._default_manager.count(), 0)
self.assertIn("Superuser creation skipped", out.getvalue())
def test_passing_stdin(self):
"""
You can pass a stdin object as an option and it should be
available on self.stdin.
If no such option is passed, it defaults to sys.stdin.
"""
sentinel = object()
command = createsuperuser.Command()
command.check = lambda: []
command.execute(
stdin=sentinel,
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='janet',
email='[email protected]',
)
self.assertIs(command.stdin, sentinel)
command = createsuperuser.Command()
command.check = lambda: []
command.execute(
stdout=six.StringIO(),
stderr=six.StringIO(),
interactive=False,
verbosity=0,
username='joe',
email='[email protected]',
)
self.assertIs(command.stdin, sys.stdin)
@override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK')
def test_fields_with_fk(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='[email protected]')
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=email.email,
group=group.pk,
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
non_existent_email = '[email protected]'
with self.assertRaisesMessage(CommandError,
'email instance with email %r does not exist.' % non_existent_email):
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=non_existent_email,
stdout=new_io,
)
@override_settings(AUTH_USER_MODEL='auth.CustomUserWithFK')
def test_fields_with_fk_interactive(self):
new_io = six.StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='[email protected]')
@mock_inputs({
'password': 'nopasswd',
'username (email.id)': email.pk,
'email (email.email)': email.email,
'group (group.id)': group.pk,
})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
test(self)
class CustomUserModelValidationTestCase(SimpleTestCase):
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonListRequiredFields')
@override_system_checks([check_user_model])
def test_required_fields_is_list(self):
"REQUIRED_FIELDS should be a list."
errors = checks.run_checks()
expected = [
checks.Error(
"'REQUIRED_FIELDS' must be a list or tuple.",
hint=None,
obj=CustomUserNonListRequiredFields,
id='auth.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields')
@override_system_checks([check_user_model])
def test_username_not_in_required_fields(self):
"USERNAME_FIELD should not appear in REQUIRED_FIELDS."
errors = checks.run_checks()
expected = [
checks.Error(
("The field named as the 'USERNAME_FIELD' for a custom user model "
"must not be included in 'REQUIRED_FIELDS'."),
hint=None,
obj=CustomUserBadRequiredFields,
id='auth.E002',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername')
@override_system_checks([check_user_model])
def test_username_non_unique(self):
"A non-unique USERNAME_FIELD should raise a model validation error."
errors = checks.run_checks()
expected = [
checks.Error(
("'CustomUserNonUniqueUsername.username' must be "
"unique because it is named as the 'USERNAME_FIELD'."),
hint=None,
obj=CustomUserNonUniqueUsername,
id='auth.E003',
),
]
self.assertEqual(errors, expected)
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername',
AUTHENTICATION_BACKENDS=[
'my.custom.backend',
])
@override_system_checks([check_user_model])
def test_username_non_unique_with_custom_backend(self):
""" A non-unique USERNAME_FIELD should raise an error only if we use the
default authentication backend. Otherwise, an warning should be raised.
"""
errors = checks.run_checks()
expected = [
checks.Warning(
("'CustomUserNonUniqueUsername.username' is named as "
"the 'USERNAME_FIELD', but it is not unique."),
hint=('Ensure that your authentication backend(s) can handle '
'non-unique usernames.'),
obj=CustomUserNonUniqueUsername,
id='auth.W004',
)
]
self.assertEqual(errors, expected)
class PermissionTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
self._original_default_permissions = models.Permission._meta.default_permissions
self._original_verbose_name = models.Permission._meta.verbose_name
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
models.Permission._meta.default_permissions = self._original_default_permissions
models.Permission._meta.verbose_name = self._original_verbose_name
ContentType.objects.clear_cache()
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
auth_app_config = apps.get_app_config('auth')
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, auth_app_config, verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(auth_app_config, verbosity=0)
def test_default_permissions(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(auth_app_config, verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.default_permissions = []
create_permissions(auth_app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_verbose_name_length(self):
auth_app_config = apps.get_app_config('auth')
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.verbose_name = "some ridiculously long verbose name that is out of control" * 5
six.assertRaisesRegex(self, exceptions.ValidationError,
"The verbose_name of auth.permission is longer than 244 characters",
create_permissions, auth_app_config, verbosity=0)
def test_custom_permission_name_length(self):
auth_app_config = apps.get_app_config('auth')
ContentType.objects.get_by_natural_key('auth', 'permission')
custom_perm_name = 'a' * 256
models.Permission._meta.permissions = [
('my_custom_permission', custom_perm_name),
]
try:
msg = (
"The permission name %s of auth.permission is longer than "
"255 characters" % custom_perm_name
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
create_permissions(auth_app_config, verbosity=0)
finally:
models.Permission._meta.permissions = []
|
bsd-3-clause
| -546,779,301,949,976,000 | -8,249,788,323,374,232,000 | 36.744463 | 111 | 0.611708 | false |
shujaatak/UAV_MissionPlanner
|
Lib/site-packages/numpy/distutils/tests/test_misc_util.py
|
51
|
2430
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
from numpy.testing import *
from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path
from os.path import join, sep, dirname
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath(TestCase):
def test_1(self):
assert_equal(appendpath('prefix','name'),join('prefix','name'))
assert_equal(appendpath('/prefix','name'),ajoin('prefix','name'))
assert_equal(appendpath('/prefix','/name'),ajoin('prefix','name'))
assert_equal(appendpath('prefix','/name'),join('prefix','name'))
def test_2(self):
assert_equal(appendpath('prefix/sub','name'),
join('prefix','sub','name'))
assert_equal(appendpath('prefix/sub','sup/name'),
join('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub','/prefix/name'),
ajoin('prefix','sub','name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub','/prefix/sup/name'),
ajoin('prefix','sub','sup','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sup/sup2/name'),
ajoin('prefix','sub','sub2','sup','sup2','name'))
assert_equal(appendpath('/prefix/sub/sub2','/prefix/sub/sup/name'),
ajoin('prefix','sub','sub2','sup','name'))
class TestMinrelpath(TestCase):
def test_1(self):
n = lambda path: path.replace('/',sep)
assert_equal(minrelpath(n('aa/bb')),n('aa/bb'))
assert_equal(minrelpath('..'),'..')
assert_equal(minrelpath(n('aa/..')),'')
assert_equal(minrelpath(n('aa/../bb')),'bb')
assert_equal(minrelpath(n('aa/bb/..')),'aa')
assert_equal(minrelpath(n('aa/bb/../..')),'')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')),n('aa/dd'))
assert_equal(minrelpath(n('.././..')),n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')),n('dd'))
class TestGpaths(TestCase):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__),'..'))
ls = gpaths('command/*.py', local_path)
assert join(local_path,'command','build_src.py') in ls,`ls`
f = gpaths('system_info.py', local_path)
assert join(local_path,'system_info.py')==f[0],`f`
if __name__ == "__main__":
run_module_suite()
|
gpl-2.0
| -2,073,784,252,760,989,400 | -8,248,005,706,733,242,000 | 40.896552 | 100 | 0.574486 | false |
Sylrob434/CouchPotatoServer
|
libs/enzyme/core.py
|
180
|
15208
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <[email protected]>
# Copyright 2003-2006 Thomas Schueppel <[email protected]>
# Copyright 2003-2006 Dirk Meyer <[email protected]>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
import re
import logging
import fourcc
import language
from strutils import str_to_unicode, unicode_to_str
UNPRINTABLE_KEYS = ['thumbnail', 'url', 'codec_private']
MEDIACORE = ['title', 'caption', 'comment', 'size', 'type', 'subtype', 'timestamp',
'keywords', 'country', 'language', 'langcode', 'url', 'artist',
'mime', 'datetime', 'tags', 'hash']
AUDIOCORE = ['channels', 'samplerate', 'length', 'encoder', 'codec', 'format',
'samplebits', 'bitrate', 'fourcc', 'trackno', 'id', 'userdate',
'enabled', 'default', 'codec_private']
MUSICCORE = ['trackof', 'album', 'genre', 'discs', 'thumbnail']
VIDEOCORE = ['length', 'encoder', 'bitrate', 'samplerate', 'codec', 'format',
'samplebits', 'width', 'height', 'fps', 'aspect', 'trackno',
'fourcc', 'id', 'enabled', 'default', 'codec_private']
AVCORE = ['length', 'encoder', 'trackno', 'trackof', 'copyright', 'product',
'genre', 'writer', 'producer', 'studio', 'rating', 'actors', 'thumbnail',
'delay', 'image', 'video', 'audio', 'subtitles', 'chapters', 'software',
'summary', 'synopsis', 'season', 'episode', 'series']
# get logging object
log = logging.getLogger(__name__)
class Media(object):
"""
Media is the base class to all Media Metadata Containers. It defines
the basic structures that handle metadata. Media and its derivates
contain a common set of metadata attributes that is listed in keys.
Specific derivates contain additional keys to the dublin core set that is
defined in Media.
"""
media = None
_keys = MEDIACORE
table_mapping = {}
def __init__(self, hash=None):
if hash is not None:
# create Media based on dict
for key, value in hash.items():
if isinstance(value, list) and value and isinstance(value[0], dict):
value = [Media(x) for x in value]
self._set(key, value)
return
self._keys = self._keys[:]
self.tables = {}
# Tags, unlike tables, are more well-defined dicts whose values are
# either Tag objects, other dicts (for nested tags), or lists of either
# (for multiple instances of the tag, e.g. actor). Where possible,
# parsers should transform tag names to conform to the Official
# Matroska tags defined at http://www.matroska.org/technical/specs/tagging/index.html
# All tag names will be lower-cased.
self.tags = Tags()
for key in set(self._keys) - set(['media', 'tags']):
setattr(self, key, None)
#
# unicode and string convertion for debugging
#
#TODO: Fix that mess
def __unicode__(self):
result = u''
# print normal attributes
lists = []
for key in self._keys:
value = getattr(self, key, None)
if value == None or key == 'url':
continue
if isinstance(value, list):
if not value:
continue
elif isinstance(value[0], basestring):
# Just a list of strings (keywords?), so don't treat it specially.
value = u', '.join(value)
else:
lists.append((key, value))
continue
elif isinstance(value, dict):
# Tables or tags treated separately.
continue
if key in UNPRINTABLE_KEYS:
value = '<unprintable data, size=%d>' % len(value)
result += u'| %10s: %s\n' % (unicode(key), unicode(value))
# print tags (recursively, to support nested tags).
def print_tags(tags, suffix, show_label):
result = ''
for n, (name, tag) in enumerate(tags.items()):
result += u'| %12s%s%s = ' % (u'tags: ' if n == 0 and show_label else '', suffix, name)
if isinstance(tag, list):
# TODO: doesn't support lists/dicts within lists.
result += u'%s\n' % ', '.join(subtag.value for subtag in tag)
else:
result += u'%s\n' % (tag.value or '')
if isinstance(tag, dict):
result += print_tags(tag, ' ', False)
return result
result += print_tags(self.tags, '', True)
# print lists
for key, l in lists:
for n, item in enumerate(l):
label = '+-- ' + key.rstrip('s').capitalize()
if key not in ['tracks', 'subtitles', 'chapters']:
label += ' Track'
result += u'%s #%d\n' % (label, n + 1)
result += '| ' + re.sub(r'\n(.)', r'\n| \1', unicode(item))
# print tables
#FIXME: WTH?
# if log.level >= 10:
# for name, table in self.tables.items():
# result += '+-- Table %s\n' % str(name)
# for key, value in table.items():
# try:
# value = unicode(value)
# if len(value) > 50:
# value = u'<unprintable data, size=%d>' % len(value)
# except (UnicodeDecodeError, TypeError):
# try:
# value = u'<unprintable data, size=%d>' % len(value)
# except AttributeError:
# value = u'<unprintable data>'
# result += u'| | %s: %s\n' % (unicode(key), value)
return result
def __str__(self):
return unicode(self).encode()
def __repr__(self):
if hasattr(self, 'url'):
return '<%s %s>' % (str(self.__class__)[8:-2], self.url)
else:
return '<%s>' % (str(self.__class__)[8:-2])
#
# internal functions
#
def _appendtable(self, name, hashmap):
"""
Appends a tables of additional metadata to the Object.
If such a table already exists, the given tables items are
added to the existing one.
"""
if name not in self.tables:
self.tables[name] = hashmap
else:
# Append to the already existing table
for k in hashmap.keys():
self.tables[name][k] = hashmap[k]
def _set(self, key, value):
"""
Set key to value and add the key to the internal keys list if
missing.
"""
if value is None and getattr(self, key, None) is None:
return
if isinstance(value, str):
value = str_to_unicode(value)
setattr(self, key, value)
if not key in self._keys:
self._keys.append(key)
def _set_url(self, url):
"""
Set the URL of the source
"""
self.url = url
def _finalize(self):
"""
Correct same data based on specific rules
"""
# make sure all strings are unicode
for key in self._keys:
if key in UNPRINTABLE_KEYS:
continue
value = getattr(self, key)
if value is None:
continue
if key == 'image':
if isinstance(value, unicode):
setattr(self, key, unicode_to_str(value))
continue
if isinstance(value, str):
setattr(self, key, str_to_unicode(value))
if isinstance(value, unicode):
setattr(self, key, value.strip().rstrip().replace(u'\0', u''))
if isinstance(value, list) and value and isinstance(value[0], Media):
for submenu in value:
submenu._finalize()
# copy needed tags from tables
for name, table in self.tables.items():
mapping = self.table_mapping.get(name, {})
for tag, attr in mapping.items():
if self.get(attr):
continue
value = table.get(tag, None)
if value is not None:
if not isinstance(value, (str, unicode)):
value = str_to_unicode(str(value))
elif isinstance(value, str):
value = str_to_unicode(value)
value = value.strip().rstrip().replace(u'\0', u'')
setattr(self, attr, value)
if 'fourcc' in self._keys and 'codec' in self._keys and self.codec is not None:
# Codec may be a fourcc, in which case we resolve it to its actual
# name and set the fourcc attribute.
self.fourcc, self.codec = fourcc.resolve(self.codec)
if 'language' in self._keys:
self.langcode, self.language = language.resolve(self.language)
#
# data access
#
def __contains__(self, key):
"""
Test if key exists in the dict
"""
return hasattr(self, key)
def get(self, attr, default=None):
"""
Returns the given attribute. If the attribute is not set by
the parser return 'default'.
"""
return getattr(self, attr, default)
def __getitem__(self, attr):
"""
Get the value of the given attribute
"""
return getattr(self, attr, None)
def __setitem__(self, key, value):
"""
Set the value of 'key' to 'value'
"""
setattr(self, key, value)
def has_key(self, key):
"""
Check if the object has an attribute 'key'
"""
return hasattr(self, key)
def convert(self):
"""
Convert Media to dict.
"""
result = {}
for k in self._keys:
value = getattr(self, k, None)
if isinstance(value, list) and value and isinstance(value[0], Media):
value = [x.convert() for x in value]
result[k] = value
return result
def keys(self):
"""
Return all keys for the attributes set by the parser.
"""
return self._keys
class Collection(Media):
"""
Collection of Digial Media like CD, DVD, Directory, Playlist
"""
_keys = Media._keys + ['id', 'tracks']
def __init__(self):
Media.__init__(self)
self.tracks = []
class Tag(object):
"""
An individual tag, which will be a value stored in a Tags object.
Tag values are strings (for binary data), unicode objects, or datetime
objects for tags that represent dates or times.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tag, self).__init__()
self.value = value
self.langcode = langcode
self.binary = binary
def __unicode__(self):
return unicode(self.value)
def __str__(self):
return str(self.value)
def __repr__(self):
if not self.binary:
return '<Tag object: %s>' % repr(self.value)
else:
return '<Binary Tag object: size=%d>' % len(self.value)
@property
def langcode(self):
return self._langcode
@langcode.setter
def langcode(self, code):
self._langcode, self.language = language.resolve(code)
class Tags(dict, Tag):
"""
A dictionary containing Tag objects. Values can be other Tags objects
(for nested tags), lists, or Tag objects.
A Tags object is more or less a dictionary but it also contains a value.
This is necessary in order to represent this kind of tag specification
(e.g. for Matroska)::
<Simple>
<Name>LAW_RATING</Name>
<String>PG</String>
<Simple>
<Name>COUNTRY</Name>
<String>US</String>
</Simple>
</Simple>
The attribute RATING has a value (PG), but it also has a child tag
COUNTRY that specifies the country code the rating belongs to.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tags, self).__init__()
self.value = value
self.langcode = langcode
self.binary = False
class AudioStream(Media):
"""
Audio Tracks in a Multiplexed Container.
"""
_keys = Media._keys + AUDIOCORE
class Music(AudioStream):
"""
Digital Music.
"""
_keys = AudioStream._keys + MUSICCORE
def _finalize(self):
"""
Correct same data based on specific rules
"""
AudioStream._finalize(self)
if self.trackof:
try:
# XXX Why is this needed anyway?
if int(self.trackno) < 10:
self.trackno = u'0%s' % int(self.trackno)
except (AttributeError, ValueError):
pass
class VideoStream(Media):
"""
Video Tracks in a Multiplexed Container.
"""
_keys = Media._keys + VIDEOCORE
class Chapter(Media):
"""
Chapter in a Multiplexed Container.
"""
_keys = ['enabled', 'name', 'pos', 'id']
def __init__(self, name=None, pos=0):
Media.__init__(self)
self.name = name
self.pos = pos
self.enabled = True
class Subtitle(Media):
"""
Subtitle Tracks in a Multiplexed Container.
"""
_keys = ['enabled', 'default', 'langcode', 'language', 'trackno', 'title',
'id', 'codec']
def __init__(self, language=None):
Media.__init__(self)
self.language = language
class AVContainer(Media):
"""
Container for Audio and Video streams. This is the Container Type for
all media, that contain more than one stream.
"""
_keys = Media._keys + AVCORE
def __init__(self):
Media.__init__(self)
self.audio = []
self.video = []
self.subtitles = []
self.chapters = []
def _finalize(self):
"""
Correct same data based on specific rules
"""
Media._finalize(self)
if not self.length and len(self.video) and self.video[0].length:
self.length = 0
# Length not specified for container, so use the largest length
# of its tracks as container length.
for track in self.video + self.audio:
if track.length:
self.length = max(self.length, track.length)
|
gpl-3.0
| 3,213,977,168,907,093,500 | 3,020,059,638,574,379,000 | 32.795556 | 103 | 0.541294 | false |
Tampy/CS585
|
src/ext/gtest-1.7.0/test/gtest_test_utils.py
|
1100
|
10812
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
gpl-2.0
| 8,149,438,757,631,523,000 | 1,589,441,087,069,172,500 | 32.7875 | 79 | 0.674251 | false |
suutari-ai/shoop
|
shuup_tests/core/test_shops.py
|
3
|
2463
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import parler.appsettings
import pytest
from filer.models import Folder, Image
from shuup.core.models import Shop, ShopStatus
from shuup.testing.factories import DEFAULT_IDENTIFIER, DEFAULT_NAME
caching_was_enabled = None
def setup_module(module):
# override_settings does to work with parler, since it does not read
# django.conf.settings but parler.appsettings
global caching_was_enabled
caching_was_enabled = parler.appsettings.PARLER_ENABLE_CACHING
parler.appsettings.PARLER_ENABLE_CACHING = False
def teardown_module(module):
parler.appsettings.PARLER_ENABLE_CACHING = caching_was_enabled
@pytest.mark.django_db
def test_shop_wont_be_deleted():
shop = Shop.objects.create(
name=DEFAULT_NAME,
identifier=DEFAULT_IDENTIFIER,
status=ShopStatus.ENABLED,
public_name=DEFAULT_NAME
)
folder = Folder.objects.create(name="Root")
img = Image.objects.create(name="imagefile", folder=folder)
shop.logo = img
shop.save()
img.delete()
Shop.objects.get(pk=shop.pk)
@pytest.mark.django_db
def test_shop_translations_get_saved():
obj = Shop.objects.language('en').create(name="Store")
obj.set_current_language('fi')
obj.name = "Liike"
assert set(obj.get_available_languages(include_unsaved=True)) == set(['en', 'fi'])
assert set(obj.get_available_languages()) == set(['en'])
obj.save()
assert set(obj.get_available_languages()) == set(['en', 'fi'])
assert Shop.objects.language('en').get(pk=obj.pk).name == "Store"
assert Shop.objects.language('fi').get(pk=obj.pk).name == "Liike"
@pytest.mark.django_db
def test_shop_translations_manager():
shop = Shop.objects.language('en').create(name="Store")
shop.set_current_language('fi')
shop.name = "Liike"
shop.save()
found = Shop.objects.language('fi').get(pk=shop.pk)
assert found == shop
assert found.name == "Liike"
found = Shop.objects.language('en').get(pk=shop.pk)
assert found == shop
assert found.name == "Store"
found = Shop.objects.translated('fi', name="Liike").get(pk=shop.pk)
assert found == shop
found = Shop.objects.translated('en', name="Store").get(pk=shop.pk)
assert found == shop
|
agpl-3.0
| 3,335,272,118,657,597,000 | -3,025,146,525,827,399,000 | 29.407407 | 86 | 0.690621 | false |
myarjunar/inasafe
|
safe/utilities/keyword_io.py
|
1
|
22390
|
# coding=utf-8
"""Keyword IO implementation."""
import logging
from ast import literal_eval
from datetime import datetime
from PyQt4.QtCore import QObject
from PyQt4.QtCore import QUrl, QDateTime
from qgis.core import QgsMapLayer
from safe.definitions.utilities import definition
from safe import messaging as m
from safe.messaging import styles
from safe.utilities.i18n import tr
from safe.utilities.metadata import (
write_iso19115_metadata, read_iso19115_metadata)
from safe.utilities.unicode import get_string
__copyright__ = "Copyright 2011, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('InaSAFE')
# Notes(IS): This class can be replaced by safe.utilities.metadata
# Some methods for viewing the keywords should be put in the other class
class KeywordIO(QObject):
"""Class for doing keyword read/write operations.
It abstracts away differences between using SAFE to get keywords from a
.keywords file and this plugins implementation of keyword caching in a
local sqlite db used for supporting keywords for remote datasources.
"""
def __init__(self, layer=None):
"""Constructor for the KeywordIO object.
.. versionchanged:: 3.3 added optional layer parameter.
"""
QObject.__init__(self)
self.layer = layer
@staticmethod
def read_keywords(layer, keyword=None):
"""Read keywords for a datasource and return them as a dictionary.
This is a wrapper method that will 'do the right thing' to fetch
keywords for the given datasource. In particular, if the datasource
is remote (e.g. a database connection) it will fetch the keywords from
the keywords store.
:param layer: A QGIS QgsMapLayer instance that you want to obtain
the keywords for.
:type layer: QgsMapLayer, QgsRasterLayer, QgsVectorLayer,
QgsPluginLayer
:param keyword: If set, will extract only the specified keyword
from the keywords dict.
:type keyword: str
:returns: A dict if keyword is omitted, otherwise the value for the
given key if it is present.
:rtype: dict, str
TODO: Don't raise generic exceptions.
:raises: HashNotFoundError, Exception, OperationalError,
NoKeywordsFoundError, KeywordNotFoundError, InvalidParameterError,
UnsupportedProviderError
"""
source = layer.source()
# Try to read from ISO metadata first.
return read_iso19115_metadata(source, keyword)
@staticmethod
def write_keywords(layer, keywords):
"""Write keywords for a datasource.
This is a wrapper method that will 'do the right thing' to store
keywords for the given datasource. In particular, if the datasource
is remote (e.g. a database connection) it will write the keywords from
the keywords store.
:param layer: A QGIS QgsMapLayer instance.
:type layer: qgis.core.QgsMapLayer
:param keywords: A dict containing all the keywords to be written
for the layer.
:type keywords: dict
:raises: UnsupportedProviderError
"""
if not isinstance(layer, QgsMapLayer):
raise Exception(
tr('The layer is not a QgsMapLayer : {type}').format(
type=type(layer)))
source = layer.source()
write_iso19115_metadata(source, keywords)
# methods below here should be considered private
def to_message(self, keywords=None, show_header=True):
"""Format keywords as a message object.
.. versionadded:: 3.2
.. versionchanged:: 3.3 - default keywords to None
The message object can then be rendered to html, plain text etc.
:param keywords: Keywords to be converted to a message. Optional. If
not passed then we will attempt to get keywords from self.layer
if it is not None.
:type keywords: dict
:param show_header: Flag indicating if InaSAFE logo etc. should be
added above the keywords table. Default is True.
:type show_header: bool
:returns: A safe message object containing a table.
:rtype: safe.messaging.Message
"""
if keywords is None and self.layer is not None:
keywords = self.read_keywords(self.layer)
# This order was determined in issue #2313
preferred_order = [
'title',
'layer_purpose',
'exposure',
'hazard',
'hazard_category',
'layer_geometry',
'layer_mode',
'classification',
'exposure_unit',
'continuous_hazard_unit',
'value_map', # attribute values
'thresholds', # attribute values
'value_maps', # attribute values
'inasafe_fields',
'inasafe_default_values',
'resample',
'source',
'url',
'scale',
'license',
'date',
'keyword_version'
] # everything else in arbitrary order
report = m.Message()
if show_header:
logo_element = m.Brand()
report.add(logo_element)
report.add(m.Heading(tr(
'Layer keywords:'), **styles.BLUE_LEVEL_4_STYLE))
report.add(m.Text(tr(
'The following keywords are defined for the active layer:')))
table = m.Table(style_class='table table-condensed table-striped')
# First render out the preferred order keywords
for keyword in preferred_order:
if keyword in keywords:
value = keywords[keyword]
row = self._keyword_to_row(keyword, value)
keywords.pop(keyword)
table.add(row)
# now render out any remaining keywords in arbitrary order
for keyword in keywords:
value = keywords[keyword]
row = self._keyword_to_row(keyword, value)
table.add(row)
# If the keywords class was instantiated with a layer object
# we can add some context info not stored in the keywords themselves
# but that is still useful to see...
if self.layer:
# First the CRS
keyword = tr('Reference system')
value = self.layer.crs().authid()
row = self._keyword_to_row(keyword, value)
table.add(row)
# Next the data source
keyword = tr('Layer source')
value = self.layer.source()
row = self._keyword_to_row(keyword, value, wrap_slash=True)
table.add(row)
# Finalise the report
report.add(table)
return report
def _keyword_to_row(self, keyword, value, wrap_slash=False):
"""Helper to make a message row from a keyword.
.. versionadded:: 3.2
Use this when constructing a table from keywords to display as
part of a message object.
:param keyword: The keyword to be rendered.
:type keyword: str
:param value: Value of the keyword to be rendered.
:type value: basestring
:param wrap_slash: Whether to replace slashes with the slash plus the
html <wbr> tag which will help to e.g. wrap html in small cells if
it contains a long filename. Disabled by default as it may cause
side effects if the text contains html markup.
:type wrap_slash: bool
:returns: A row to be added to a messaging table.
:rtype: safe.messaging.items.row.Row
"""
row = m.Row()
# Translate titles explicitly if possible
if keyword == 'title':
value = tr(value)
# # See #2569
if keyword == 'url':
if isinstance(value, QUrl):
value = value.toString()
if keyword == 'date':
if isinstance(value, QDateTime):
value = value.toString('d MMM yyyy')
elif isinstance(value, datetime):
value = value.strftime('%d %b %Y')
# we want to show the user the concept name rather than its key
# if possible. TS
keyword_definition = definition(keyword)
if keyword_definition is None:
keyword_definition = tr(keyword.capitalize().replace(
'_', ' '))
else:
try:
keyword_definition = keyword_definition['name']
except KeyError:
# Handling if name is not exist.
keyword_definition = keyword_definition['key'].capitalize()
keyword_definition = keyword_definition.replace('_', ' ')
# We deal with some special cases first:
# In this case the value contains a DICT that we want to present nicely
if keyword in [
'value_map',
'inasafe_fields',
'inasafe_default_values',
'extra_keywords']:
value = self._dict_to_row(value)
elif keyword == 'value_maps':
value = self._value_maps_row(value)
elif keyword == 'thresholds':
value = self._threshold_to_row(value)
# In these KEYWORD cases we show the DESCRIPTION for
# the VALUE keyword_definition
elif keyword in ['classification']:
# get the keyword_definition for this class from definitions
value = definition(value)
value = value['description']
# In these VALUE cases we show the DESCRIPTION for
# the VALUE keyword_definition
elif value in []:
# get the keyword_definition for this class from definitions
value = definition(value)
value = value['description']
# In these VALUE cases we show the NAME for the VALUE
# keyword_definition
elif value in [
'multiple_event',
'single_event',
'point',
'line',
'polygon'
'field']:
# get the name for this class from definitions
value = definition(value)
value = value['name']
# otherwise just treat the keyword as literal text
else:
# Otherwise just directly read the value
value = get_string(value)
key = m.ImportantText(keyword_definition)
row.add(m.Cell(key))
row.add(m.Cell(value, wrap_slash=wrap_slash))
return row
@staticmethod
def _threshold_to_row(thresholds_keyword):
"""Helper to make a message row from a threshold
We are expecting something like this:
{
'thresholds': {
'structure': {
'ina_structure_flood_hazard_classification': {
'classes': {
'low': [1, 2],
'medium': [3, 4],
'high': [5, 6]
},
'active': True
},
'ina_structure_flood_hazard_4_class_classification':
{
'classes': {
'low': [1, 2],
'medium': [3, 4],
'high': [5, 6],
'very_high': [7, 8]
},
'active': False
}
},
'population': {
'ina_population_flood_hazard_classification': {
'classes': {
'low': [1, 2.5],
'medium': [2.5, 4.5],
'high': [4.5, 6]
},
'active': False
},
'ina_population_flood_hazard_4_class_classification':
{
'classes': {
'low': [1, 2.5],
'medium': [2.5, 4],
'high': [4, 6],
'very_high': [6, 8]
},
'active': True
}
},
},
Each value is a list with exactly two element [a, b], where a <= b.
:param thresholds_keyword: Value of the keyword to be rendered. This
must be a string representation of a dict, or a dict.
:type thresholds_keyword: basestring, dict
:returns: A table to be added into a cell in the keywords table.
:rtype: safe.messaging.items.table
"""
if isinstance(thresholds_keyword, basestring):
thresholds_keyword = literal_eval(thresholds_keyword)
for k, v in thresholds_keyword.items():
# If the v is not dictionary, it should be the old value maps.
# To handle thresholds in the Impact Function.
if not isinstance(v, dict):
table = m.Table(style_class='table table-condensed')
for key, value in thresholds_keyword.items():
row = m.Row()
name = definition(key)['name'] if definition(key) else key
row.add(m.Cell(m.ImportantText(name)))
pretty_value = tr('%s to %s' % (value[0], value[1]))
row.add(m.Cell(pretty_value))
table.add(row)
return table
table = m.Table(style_class='table table-condensed table-striped')
i = 0
for exposure_key, classifications in thresholds_keyword.items():
i += 1
exposure = definition(exposure_key)
exposure_row = m.Row()
exposure_row.add(m.Cell(m.ImportantText('Exposure')))
exposure_row.add(m.Cell(m.Text(exposure['name'])))
exposure_row.add(m.Cell(''))
table.add(exposure_row)
active_classification = None
classification_row = m.Row()
classification_row.add(m.Cell(m.ImportantText('Classification')))
for classification, value in classifications.items():
if value.get('active'):
active_classification = definition(classification)
classification_row.add(
m.Cell(active_classification['name']))
classification_row.add(m.Cell(''))
break
if not active_classification:
classification_row.add(m.Cell(tr('No classifications set.')))
classification_row.add(m.Cell(''))
continue
table.add(classification_row)
header = m.Row()
header.add(m.Cell(tr('Class name')))
header.add(m.Cell(tr('Minimum')))
header.add(m.Cell(tr('Maximum')))
table.add(header)
classes = active_classification.get('classes')
# Sort by value, put the lowest first
classes = sorted(classes, key=lambda k: k['value'])
for the_class in classes:
threshold = classifications[active_classification['key']][
'classes'][the_class['key']]
row = m.Row()
row.add(m.Cell(the_class['name']))
row.add(m.Cell(threshold[0]))
row.add(m.Cell(threshold[1]))
table.add(row)
if i < len(thresholds_keyword):
# Empty row
empty_row = m.Row()
empty_row.add(m.Cell(''))
empty_row.add(m.Cell(''))
table.add(empty_row)
return table
@staticmethod
def _dict_to_row(keyword_value):
"""Helper to make a message row from a keyword where value is a dict.
.. versionadded:: 3.2
Use this when constructing a table from keywords to display as
part of a message object. This variant will unpack the dict and
present it nicely in the keyword value area as a nested table in the
cell.
We are expecting keyword value would be something like this:
"{'high': ['Kawasan Rawan Bencana III'], "
"'medium': ['Kawasan Rawan Bencana II'], "
"'low': ['Kawasan Rawan Bencana I']}"
Or by passing a python dict object with similar layout to above.
i.e. A string representation of a dict where the values are lists.
:param keyword_value: Value of the keyword to be rendered. This must
be a string representation of a dict, or a dict.
:type keyword_value: basestring, dict
:returns: A table to be added into a cell in the keywords table.
:rtype: safe.messaging.items.table
"""
if isinstance(keyword_value, basestring):
keyword_value = literal_eval(keyword_value)
table = m.Table(style_class='table table-condensed')
# Sorting the key
for key in sorted(keyword_value.keys()):
value = keyword_value[key]
row = m.Row()
# First the heading
if definition(key):
name = definition(key)['name']
else:
name = tr(key.replace('_', ' ').capitalize())
row.add(m.Cell(m.ImportantText(name)))
# Then the value. If it contains more than one element we
# present it as a bullet list, otherwise just as simple text
if isinstance(value, (tuple, list, dict, set)):
if len(value) > 1:
bullets = m.BulletedList()
for item in value:
bullets.add(item)
row.add(m.Cell(bullets))
elif len(value) == 0:
row.add(m.Cell(""))
else:
row.add(m.Cell(value[0]))
else:
row.add(m.Cell(value))
table.add(row)
return table
@staticmethod
def _value_maps_row(value_maps_keyword):
"""Helper to make a message row from a value maps.
Expected keywords:
'value_maps': {
'structure': {
'ina_structure_flood_hazard_classification': {
'classes': {
'low': [1, 2, 3],
'medium': [4],
'high': [5, 6]
},
'active': True
},
'ina_structure_flood_hazard_4_class_classification':
{
'classes': {
'low': [1],
'medium': [2, 3, 4],
'high': [5, 6, 7],
'very_high': [8]
},
'active': False
}
},
'population': {
'ina_population_flood_hazard_classification': {
'classes': {
'low': [1],
'medium': [2, 3],
'high': [4, 5, 6]
},
'active': False
},
'ina_population_flood_hazard_4_class_classification':
{
'classes': {
'low': [1, 2],
'medium': [3, 4],
'high': [4, 5, 6],
'very_high': [6, 7, 8]
},
'active': True
}
},
}
:param value_maps_keyword: Value of the keyword to be rendered. This
must be a string representation of a dict, or a dict.
:type value_maps_keyword: basestring, dict
:returns: A table to be added into a cell in the keywords table.
:rtype: safe.messaging.items.table
"""
if isinstance(value_maps_keyword, basestring):
value_maps_keyword = literal_eval(value_maps_keyword)
table = m.Table(style_class='table table-condensed table-striped')
i = 0
for exposure_key, classifications in value_maps_keyword.items():
i += 1
exposure = definition(exposure_key)
exposure_row = m.Row()
exposure_row.add(m.Cell(m.ImportantText(tr('Exposure'))))
exposure_row.add(m.Cell(exposure['name']))
table.add(exposure_row)
classification_row = m.Row()
classification_row.add(m.Cell(m.ImportantText(tr(
'Classification'))))
active_classification = None
for classification, value in classifications.items():
if value.get('active'):
active_classification = definition(classification)
if active_classification.get('name'):
classification_row.add(
m.Cell(active_classification['name']))
break
if not active_classification:
classification_row.add(m.Cell(tr('No classifications set.')))
continue
table.add(classification_row)
header = m.Row()
header.add(m.Cell(tr('Class name')))
header.add(m.Cell(tr('Values')))
table.add(header)
classes = active_classification.get('classes')
# Sort by value, put the lowest first
classes = sorted(classes, key=lambda k: k['value'])
for the_class in classes:
value_map = classifications[active_classification['key']][
'classes'].get(the_class['key'], [])
row = m.Row()
row.add(m.Cell(the_class['name']))
row.add(m.Cell(', '.join([str(v) for v in value_map])))
table.add(row)
if i < len(value_maps_keyword):
# Empty row
empty_row = m.Row()
empty_row.add(m.Cell(''))
empty_row.add(m.Cell(''))
table.add(empty_row)
return table
|
gpl-3.0
| 7,383,276,149,102,030,000 | 2,321,312,639,413,865,500 | 36.254576 | 79 | 0.525949 | false |
popazerty/SDG-gui
|
lib/python/Components/SelectionList.py
|
49
|
2073
|
from MenuList import MenuList
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from enigma import eListboxPythonMultiContent, eListbox, gFont, RT_HALIGN_LEFT
from Tools.LoadPixmap import LoadPixmap
import skin
selectionpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_on.png"))
def SelectionEntryComponent(description, value, index, selected):
dx, dy, dw, dh = skin.parameters.get("SelectionListDescr",(25, 3, 650, 30))
res = [
(description, value, index, selected),
(eListboxPythonMultiContent.TYPE_TEXT, dx, dy, dw, dh, 0, RT_HALIGN_LEFT, description)
]
if selected:
ix, iy, iw, ih = skin.parameters.get("SelectionListLock",(0, 2, 25, 24))
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, ix, iy, iw, ih, selectionpng))
return res
class SelectionList(MenuList):
def __init__(self, list = None, enableWrapAround = False):
MenuList.__init__(self, list or [], enableWrapAround, content = eListboxPythonMultiContent)
font = skin.fonts.get("SelectionList", ("Regular", 20, 30))
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
def addSelection(self, description, value, index, selected = True):
self.list.append(SelectionEntryComponent(description, value, index, selected))
self.setList(self.list)
def toggleSelection(self):
idx = self.getSelectedIndex()
item = self.list[idx][0]
self.list[idx] = SelectionEntryComponent(item[0], item[1], item[2], not item[3])
self.setList(self.list)
def getSelectionsList(self):
return [ (item[0][0], item[0][1], item[0][2]) for item in self.list if item[0][3] ]
def toggleAllSelection(self):
for idx,item in enumerate(self.list):
item = self.list[idx][0]
self.list[idx] = SelectionEntryComponent(item[0], item[1], item[2], not item[3])
self.setList(self.list)
def sort(self, sortType=False, flag=False):
# sorting by sortType:
# 0 - description
# 1 - value
# 2 - index
# 3 - selected
self.list.sort(key=lambda x: x[0][sortType],reverse=flag)
self.setList(self.list)
|
gpl-2.0
| -4,999,476,450,307,070,000 | -4,521,112,416,401,943,600 | 38.113208 | 114 | 0.722624 | false |
lukeiwanski/tensorflow
|
tensorflow/python/layers/base_test.py
|
6
|
21660
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import base as base_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertEqual(layer.variables, [])
self.assertEqual(layer.trainable_variables, [])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
# updates, losses only supported in GRAPH mode
self.assertEqual(layer.updates, [])
self.assertEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
@test_util.run_in_graph_and_eager_modes
def testInt64Layer(self):
layer = base_layers.Layer(name='my_layer', dtype='int64')
layer.add_variable('my_var', [2, 2])
self.assertEqual(layer.name, 'my_layer')
@test_util.run_in_graph_and_eager_modes
def testAddWeight(self):
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertEqual(layer.variables, [variable])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertEqual(layer.variables, [variable, variable_2])
self.assertEqual(layer.trainable_variables, [variable])
self.assertEqual(layer.non_trainable_variables, [variable_2])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
# regularizers only supported in GRAPH mode.
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
variable = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
def testReusePartitionedVaraiblesAndRegularizers(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
partitioner = partitioned_variables.fixed_size_partitioner(3)
for reuse in [False, True]:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
partitioner=partitioner,
reuse=reuse):
layer = base_layers.Layer(name='my_layer')
variable = layer.add_variable(
'reg_part_var', [4, 4],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 3)
def testNoEagerActivityRegularizer(self):
with context.eager_mode():
with self.assertRaisesRegexp(ValueError, 'activity_regularizer'):
core_layers.Dense(1, activity_regularizer=lambda *args, **kwargs: 0.)
@test_util.run_in_graph_and_eager_modes
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op is only supported in GRAPH mode
self.assertEqual(outputs.op.name, 'my_layer/Square')
@test_util.run_in_graph_and_eager_modes
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
if not context.executing_eagerly():
# op only supported in GRAPH mode.
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._graph, layer._graph)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
@test_util.run_in_graph_and_eager_modes
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
@test_util.run_in_graph_and_eager_modes
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
layer.apply(constant_op.constant([1]))
# Note that we re-create the layer since in Eager mode, input spec checks
# only happen on first call.
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
layer.apply(constant_op.constant([1]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[[1], [2]]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
if not context.executing_eagerly():
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
layer.apply(constant_op.constant([[[1], [2]]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1], [2]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
layer.apply(constant_op.constant(1, dtype=dtypes.int32))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant(1.0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected axis'):
layer.apply(constant_op.constant([1, 2, 3]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([1, 2]))
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError, r'expected shape'):
layer.apply(constant_op.constant([[1, 2]]))
# Works
layer = CustomerLayer()
layer.apply(constant_op.constant([[1, 2, 3], [4, 5, 6]]))
@test_util.run_in_graph_and_eager_modes
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
layer.apply(constant_op.constant(1))
# Works
if not context.executing_eagerly():
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
@test_util.run_in_graph_and_eager_modes
def test_count_params(self):
dense = core_layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = core_layers.Dense(16)
with self.assertRaises(ValueError):
dense.count_params()
@test_util.run_in_graph_and_eager_modes
def testDictInputOutput(self):
class DictLayer(base_layers.Layer):
def call(self, inputs):
return {'l' + key: inputs[key] for key in inputs}
layer = DictLayer()
if context.executing_eagerly():
i1 = constant_op.constant(3)
i2 = constant_op.constant(4.0)
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
self.assertEqual(3, result['label'].numpy())
self.assertEqual(4.0, result['logits'].numpy())
else:
i1 = array_ops.placeholder('int32')
i2 = array_ops.placeholder('float32')
result = layer.apply({'abel': i1, 'ogits': i2})
self.assertTrue(isinstance(result, dict))
self.assertEqual(set(['label', 'logits']), set(result.keys()))
def testActivityRegularizer(self):
regularizer = math_ops.reduce_sum
layer = base_layers.Layer(activity_regularizer=regularizer)
x = array_ops.placeholder('int32')
layer.apply(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
def testNameScopeIsConsistentWithVariableScope(self):
# Github issue 13429.
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return math_ops.multiply(inputs, self.my_var, name='my_op')
def _gen_layer(x, name=None):
layer = MyLayer(name=name)
out = layer.apply(x)
return layer, out
# unnamed layer
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x)
layer1, op1 = _gen_layer(op)
layer2, op2 = _gen_layer(op1)
self.assertEqual(layer.my_var.name, 'my_layer/my_var:0')
self.assertEqual(op.name, 'my_layer/my_op:0')
self.assertEqual(layer1.my_var.name, 'my_layer_1/my_var:0')
self.assertEqual(op1.name, 'my_layer_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'my_layer_2/my_var:0')
self.assertEqual(op2.name, 'my_layer_2/my_op:0')
# name starts from zero
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name')
layer1, op1 = _gen_layer(op, name='name_1')
layer2, op2 = _gen_layer(op1, name='name_2')
self.assertEqual(layer.my_var.name, 'name/my_var:0')
self.assertEqual(op.name, 'name/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_1/my_var:0')
self.assertEqual(op1.name, 'name_1/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_2/my_var:0')
self.assertEqual(op2.name, 'name_2/my_op:0')
# name starts from one
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, (), 'x')
layer, op = _gen_layer(x, name='name_1')
layer1, op1 = _gen_layer(op, name='name_2')
layer2, op2 = _gen_layer(op1, name='name_3')
self.assertEqual(layer.my_var.name, 'name_1/my_var:0')
self.assertEqual(op.name, 'name_1/my_op:0')
self.assertEqual(layer1.my_var.name, 'name_2/my_var:0')
self.assertEqual(op1.name, 'name_2/my_op:0')
self.assertEqual(layer2.my_var.name, 'name_3/my_var:0')
self.assertEqual(op2.name, 'name_3/my_op:0')
def testVariablesAreLiftedFromFunctionBuildingGraphs(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable('my_var', (), dtypes.float32)
self.built = True
def call(self, inputs):
return inputs
outer_graph = ops.get_default_graph()
function_building_graph = ops.Graph()
function_building_graph._building_function = True
with outer_graph.as_default():
with function_building_graph.as_default():
layer = MyLayer()
# Create a variable by invoking build through __call__ and assert that
# it is both tracked and lifted into the outer graph.
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
layer.apply(inputs)
self.assertEqual(len(layer.variables), 1)
self.assertEqual(len(layer.trainable_variables), 1)
self.assertEqual(layer.variables[0].graph, outer_graph)
def testGetUpdateFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_update(state_ops.assign_add(self.a, 1., name='b_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.a, inputs, name='a_update'),
inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
# Call same layer on new input, creating one more conditional update
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(None)), 1)
# Check that we are successfully filtering out irrelevant updates
self.assertEqual(len(layer.get_updates_for([inputs])), 1)
self.assertEqual(len(layer.get_updates_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_updates_for([outputs])), 0)
def testGetLossesFor(self):
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(),
dtypes.float32,
trainable=False)
self.b = self.add_variable('b',
(),
dtypes.float32,
trainable=False)
self.add_loss(self.a)
self.built = True
def call(self, inputs):
self.add_loss(inputs, inputs=True)
return inputs + 1
layer = MyLayer()
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(None)), 1)
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
# Call same layer on new input, creating one more conditional loss
inputs = array_ops.placeholder(dtypes.float32, (), 'inputs')
intermediate_inputs = inputs + 1
outputs = layer.apply(intermediate_inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(None)), 1)
# Check that we are successfully filtering out irrelevant losses
self.assertEqual(len(layer.get_losses_for([inputs])), 1)
self.assertEqual(len(layer.get_losses_for([intermediate_inputs])), 1)
self.assertEqual(len(layer.get_losses_for([outputs])), 0)
def testLayerGraphSetInFirstApply(self):
with ops.Graph().as_default():
# Graph at construction time is ignored
layer = core_layers.Dense(1)
with ops.Graph().as_default():
layer.apply(constant_op.constant([[1.]]))
# layer is now bound to second Graph
with ops.Graph().as_default(), self.assertRaisesRegexp(
ValueError, 'Input graph and Layer graph are not the same'):
layer.apply(constant_op.constant([[1.]]))
if __name__ == '__main__':
test.main()
|
apache-2.0
| -6,733,528,671,662,207,000 | 2,776,411,650,745,833,000 | 35.526138 | 80 | 0.653786 | false |
llloret/supercollider
|
tools/clang-format.py
|
2
|
27928
|
#!/usr/bin/env python
from __future__ import print_function, absolute_import, unicode_literals
import difflib
import glob
import os
import re
import string
import subprocess
import sys
import threading
from argparse import ArgumentParser
# Whichcraft backported shutil.which implementation
# Taken from https://github.com/pydanny/whichcraft/blob/master/whichcraft.py (version 0.5.3)
#
# BEGIN BSD-LICENSED CODE
#
# Copyright (c) 2015-2016, Daniel Roy Greenfeld All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions and
# the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of whichcraft nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try: # Forced testing
from shutil import which
except ImportError: # Forced testing
# Versions prior to Python 3.3 don't have shutil.which
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Note: This function was backported from the Python 3 source code.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
##############################################################################
# END BSD-LICENSED CODE
##############################################################################
##############################################################################
#
# Constants
#
CLANG_FORMAT_ACCEPTED_VERSION_REGEX = re.compile("8\\.\\d+\\.\\d+")
CLANG_FORMAT_ACCEPTED_VERSION_STRING = "8.y.z"
# all the extensions we format with clang-format in SC (no JS!)
CLANG_FORMAT_FILES_REGEX = re.compile('\\.(cpp|hpp|h|c|m|mm)$')
# autogen'd files, don't touch
AUTOGEN_FILES_REGEX = re.compile('(SCDoc\\.tab\\..pp|lex\\.scdoc\\.cpp|lang11d_tab\\..*)$')
# the destination filename for a git diff
DIFF_FILENAME_REGEX = re.compile('^\\+\\+\\+ b/(.*)$', re.MULTILINE)
##############################################################################
def callo(args):
"""Call a program, and capture its output
"""
return subprocess.check_output(args).decode('utf-8')
def callo_with_input(args, inputdata):
"""Call a program, pipe input into it, and capture its output
"""
pipe = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return pipe.communicate(inputdata.encode('utf-8'))[0].decode('utf-8')
def get_base_dir():
"""Get the base directory for repo.
"""
try:
return subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).rstrip().decode('utf-8')
except:
print("This script must be running in a git repo")
sys.exit(2)
class Repo(object):
"""Class encapsulates all knowledge about a git repository, and its metadata
to run clang-format.
"""
def __init__(self, path):
self.path = path
def _callgito(self, args):
"""Call git for this repository, and return the captured output
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return callo(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args)
def _callgit(self, args, stdout=None):
"""Call git for this repository without capturing output
This is designed to be used when git returns non-zero exit codes.
"""
# These two flags are the equivalent of -C in newer versions of Git
# but we use these to support versions pre 1.8.5 but it depends on the command
# and what the current directory is
return subprocess.call(['git', '--git-dir', os.path.join(self.path, ".git"),
'--work-tree', self.path] + args, stdout=stdout)
def is_detached(self):
# symbolic-ref returns 1 if the repo is in a detached HEAD state
with open(os.devnull, 'w') as DEVNULL:
return self._callgit(["symbolic-ref", "--quiet", "HEAD"], stdout=DEVNULL)
def is_ancestor(self, parent, child):
# merge base returns 0 if parent is an ancestor of child
return not self._callgit(["merge-base", "--is-ancestor", parent, child])
def is_commit(self, sha1):
# cat-file -e returns 0 if it is a valid hash
return not self._callgit(["cat-file", "-e", "%s^{commit}" % sha1])
def is_working_tree_dirty(self):
# diff returns 1 if the working tree has local changes
return self._callgit(["diff", "--quiet"])
def does_branch_exist(self, branch):
# rev-parse returns 0 if the branch exists
return not self._callgit(["rev-parse", "--verify", "--quiet", branch])
def get_merge_base(self, commit):
return self._callgito(["merge-base", "HEAD", commit]).rstrip()
def get_branch_name(self):
"""Get the current branch name, short form
This returns "master", not "refs/head/master"
Will not work if the current branch is detached
"""
branch = self.rev_parse(["--abbrev-ref", "HEAD"])
if branch == "HEAD":
raise ValueError("Branch is currently detached")
return branch
def add(self, command): return self._callgito(["add"] + command)
def checkout(self, command): return self._callgito(["checkout"] + command)
def commit(self, command): return self._callgito(["commit"] + command)
def diff(self, command): return self._callgito(["diff"] + command)
def log(self, command): return self._callgito(["log"] + command)
def rev_parse(self, command): return self._callgito(["rev-parse"] + command).rstrip()
def rm(self, command): return self._callgito(["rm"] + command)
def show(self, command): return self._callgito(["show"] + command)
def lsfiles(self): return self._callgito(["ls-files"])
class ClangFormat(object):
"""Class encapsulates finding a suitable copy of clang-format,
and linting/formating an individual file
"""
def __init__(self, cf_cmd):
self.cf_cmd = cf_cmd
if which(cf_cmd) is None:
raise ValueError("Could not find clang-format at %s" % cf_cmd)
self._validate_version()
def _validate_version(self):
cf_version = callo([self.cf_cmd, "--version"])
if CLANG_FORMAT_ACCEPTED_VERSION_REGEX.search(cf_version):
return
# TODO add instructions to check docs when docs are written
raise ValueError("clang-format found, but incorrect version at " +
self.cf_cmd + " with version: " + cf_version + "\nAccepted versions: " +
CLANG_FORMAT_ACCEPTED_VERSION_STRING)
sys.exit(5)
def lint(self, file_name, print_diff):
"""Check the specified file has the correct format
"""
with open(file_name, 'rb') as original_text:
original_file = original_text.read().decode('utf-8')
# Get formatted file as clang-format would format the file
formatted_file = callo([self.cf_cmd, '-style=file', file_name])
if original_file != formatted_file:
if print_diff:
original_lines = original_file.splitlines()
formatted_lines = formatted_file.splitlines()
result = difflib.unified_diff(original_lines, formatted_lines, file_name, file_name)
for line in result:
print(line.rstrip())
return False
return True
def format(self, file_name):
"""Update the format of the specified file
"""
if self.lint(file_name, print_diff=False):
return True
# Update the file with clang-format
formatted = not subprocess.call([self.cf_cmd, '-style=file', '-i', file_name])
# Version 3.8 generates files like foo.cpp~RF83372177.TMP when it formats foo.cpp
# on Windows, we must clean these up
if sys.platform == "win32":
glob_pattern = file_name + "*.TMP"
for fglob in glob.glob(glob_pattern):
os.unlink(fglob)
return formatted
def get_list_from_lines(lines):
""""Convert a string containing a series of lines into a list of strings
"""
return [line.rstrip() for line in lines.splitlines()]
def validate_repo_state_for_rebase(commit_before_reformat, commit_after_reformat, target_branch):
if sys.version_info[0] == 2:
cwd = os.getcwdu()
else:
cwd = os.getcwd()
if os.path.normpath(cwd) != os.path.normpath(get_base_dir()):
raise ValueError("reformat-branch must be run from the repo root")
repo = Repo(get_base_dir())
if not repo.is_commit(commit_before_reformat):
raise ValueError("Commit before reformat '%s' is not a valid commit in this repo" %
commit_before_reformat)
if not repo.is_commit(commit_after_reformat):
raise ValueError("Commit after reformat '%s' is not a valid commit in this repo" %
commit_after_reformat)
if not repo.is_ancestor(commit_before_reformat, commit_after_reformat):
raise ValueError(("Commit before reformat '%s' is not a valid ancestor of commit after" +
" reformat '%s' in this repo") % (commit_before_reformat, commit_after_reformat))
if repo.is_detached():
raise ValueError("You must not run this script in a detached HEAD state")
if repo.is_working_tree_dirty():
raise ValueError("Your working tree has pending changes. You must have a clean working" +
" tree before proceeding.\n\nRun `git status` to see your pending changes, and then" +
" try `git stash save`, `git reset --hard`, `git submodule update` and/or committing" +
" your changes.")
merge_base = repo.get_merge_base(commit_before_reformat)
if not merge_base == repo.rev_parse([commit_before_reformat]):
raise ValueError(("Merge base is '%s'. Please rebase to '%s' and resolve all conflicts" +
" before running this script.\n\nTo interactively rebase, use `git rebase -i %s`") %
(merge_base, commit_before_reformat, commit_before_reformat))
# We assume the target branch is master, it could be a different branch if needed for testing
merge_base = repo.get_merge_base(target_branch)
if not merge_base == repo.rev_parse([commit_before_reformat]):
raise ValueError("This branch appears to already have advanced too far through the merge process")
return repo
def get_branch_names(repo):
# Everything looks good so lets start going through all the commits
branch_name = repo.get_branch_name()
new_branch = branch_name + "-reformatted"
if repo.does_branch_exist(new_branch):
raise ValueError("The branch '%s' already exists. Please delete the branch '%s', or rename the current branch." % (new_branch, new_branch))
return (branch_name, new_branch)
def is_3rd_party_file(name):
return name.find('external_libraries') != -1
def is_autogen_file(name):
return AUTOGEN_FILES_REGEX.search(name)
def is_clang_formattable(name):
return CLANG_FORMAT_FILES_REGEX.search(name)
def is_wanted_clang_formattable_file(f):
"""Is this something we want to use ClangFormat to format?
"""
return is_clang_formattable(f) and not is_3rd_party_file(f) and not is_autogen_file(f)
def get_all_clang_formattable_files(repo):
files = get_list_from_lines(repo.lsfiles())
return [f for f in files if is_wanted_clang_formattable_file(f)]
def rebase_branch(clang_format, commit_before_reformat, commit_after_reformat, target_branch):
"""Reformat a branch made before a clang-format run
"""
clang_format = ClangFormat(clang_format)
repo = validate_repo_state_for_rebase(commit_before_reformat, commit_after_reformat, target_branch)
old_branch, new_branch = get_branch_names(repo)
commits = get_list_from_lines(repo.log(["--reverse", "--pretty=format:%H", "%s..HEAD" % commit_before_reformat]))
previous_commit_base = commit_after_reformat
# Go through all the commits the user made on the local branch and migrate to a new branch
# that is based on post_reformat commits instead
for idx, commit_hash in enumerate(commits):
print("--- Formatting " + commit_hash + (" (%s of %s)" % (idx + 1, len(commits))))
repo.checkout(["--quiet", "--detach", commit_hash])
deleted_files = []
# Format each of the files by checking out just a single commit from the user's branch
commit_files = get_list_from_lines(repo.diff(["HEAD~", "--name-only"]))
for commit_file in commit_files:
# Format each file needed if it was not deleted
if not os.path.exists(commit_file):
print("\tSkipping file '%s' since it has been deleted in commit '%s'" % (
commit_file, commit_hash))
deleted_files.append(commit_file)
continue
if is_3rd_party_file(commit_file):
print("\tSkipping external libraries file '%s'" % commit_file)
elif is_autogen_file(commit_file):
print("\tSkipping autogenerated file '%s'" % commit_file)
elif is_clang_formattable(commit_file):
clang_format.format(commit_file)
else:
print("\tSkipping file '%s' (no formatting to apply)" % commit_file)
# Check if anything needed reformatting, and if so amend the commit
if not repo.is_working_tree_dirty():
print ("Commit %s needed no reformatting" % commit_hash)
else:
repo.commit(["--all", "--amend", "--no-edit"])
# Rebase our new commit on top the post-reformat commit
previous_commit = repo.rev_parse(["HEAD"])
# Checkout the new branch with the reformatted commits
# Note: we will not name as a branch until we are done with all commits on the local branch
repo.checkout(["--quiet", "--detach", previous_commit_base])
# Copy each file from the reformatted commit on top of the post reformat
diff_files = get_list_from_lines(repo.diff(["%s~..%s" % (previous_commit, previous_commit),
"--name-only"]))
for diff_file in diff_files:
# If the file was deleted in the commit we are reformatting, we need to delete it again
if diff_file in deleted_files:
repo.rm([diff_file])
continue
# The file has been added or modified, continue as normal
file_contents = repo.show(["%s:%s" % (previous_commit, diff_file)])
root_dir = os.path.dirname(diff_file)
if root_dir and not os.path.exists(root_dir):
os.makedirs(root_dir)
with open(diff_file, "w+") as new_file:
new_file.write(file_contents)
repo.add([diff_file])
# Create a new commit onto clang-formatted branch
repo.commit(["--reuse-message=%s" % previous_commit])
previous_commit_base = repo.rev_parse(["HEAD"])
# Create a new branch to mark the hashes we have been using
repo.checkout(["-b", new_branch])
print("reformat-branch is done running.\n")
print("A copy of your branch has been made named '%s', and formatted with clang-format.\n" % new_branch)
print("The original branch has been left unchanged.")
print("If you have not just done so, the next step is to rebase the new branch on '%s'.\n" % target_branch)
print("To undo this, run `git checkout %s && git branch -D %s`" % (old_branch, new_branch))
def is_wanted_diff(diff_text):
# Extract file name
match = DIFF_FILENAME_REGEX.search(diff_text)
if not match:
if '+++ /dev/null' in diff_text:
# The file was deleted, so ignore it:
return False;
raise ValueError("Could not extract filename from diff")
return is_wanted_clang_formattable_file(match.group(1))
def filter_unwanted_files_from_diff(diff_text):
# git diff was called with -U0 so all actual diffed lines can't start with '^diff'
# Couldn't find a way to split on lookaheads, so went with this instead.
# [1:] to discard initial empty string
diffs = ['diff' + match for match in re.split('^diff', diff_text, flags=re.MULTILINE)][1:]
filter_diffs = [diff for diff in diffs if is_wanted_diff(diff)]
return ''.join(filter_diffs)
def prepare_diff_for_lint_format(clang_format, commit):
ClangFormat(clang_format) # validation
repo = Repo(get_base_dir())
if not repo.is_commit(commit):
raise ValueError("Commit before reformat '%s' is not a valid commit in this repo" % commit)
os.chdir(repo.path)
diff_text = repo.diff([commit, '-U0', '--no-color'])
return filter_unwanted_files_from_diff(diff_text)
def do_lint(clang_format, clang_format_diff, commit):
diff_text = prepare_diff_for_lint_format(clang_format, commit)
lint_out = callo_with_input(['python', clang_format_diff, '-p1', '-binary', clang_format], diff_text)
print(lint_out, end='')
if lint_out != '\n' and lint_out != '':
sys.exit(1)
def do_format(clang_format, clang_format_diff, commit):
diff_text = prepare_diff_for_lint_format(clang_format, commit)
callo_with_input(['python', clang_format_diff, '-i', '-p1', '-binary', clang_format], diff_text)
def do_lintall(clang_format):
repo = Repo(get_base_dir())
os.chdir(repo.path)
clang_format = ClangFormat(clang_format)
no_changes_needed = True
for f in get_all_clang_formattable_files(repo):
no_changes_needed = clang_format.lint(f, True) and no_changes_needed
if not no_changes_needed:
sys.exit(1)
def do_formatall(clang_format):
repo = Repo(get_base_dir())
os.chdir(repo.path)
clang_format = ClangFormat(clang_format)
for f in get_all_clang_formattable_files(repo):
clang_format.format(f)
def resolve_program_name(cmd_line_option, env_var_name, default_program_name):
if cmd_line_option != '':
return cmd_line_option
elif env_var_name in os.environ and os.environ[env_var_name] != '':
return os.environ[env_var_name]
else:
return default_program_name
def main():
parser = ArgumentParser(
usage='''
format.py lint [commit]
format.py format [commit]
format.py lintall
format.py formatall
format.py rebase -b base-branch
format.py rebase commit1 commit2 target
PLEASE READ.
This script provides commands for linting and formatting your working directory. It provides five
commands:
1. `lint` lints the diff between the working directory and a given commit
2. `format` will apply formatting rules to the diff between working directory and given commit
3. `lintall` lints all available files for various formatting rules and indicates any problems.
4. `formatall` formats all available files.
5. `rebase` reformats a branch past the great reformatting wall. It can be run two ways; the second
is simpler and usually works.
a. `format.py rebase commit-right-before-reformat commit-after-reformat original-branch`
b. `format.py 3.10 # or develop`
Rebase requires:
- you have a clean working directory
- you have rebased your branch on commit-right-before-reformat (implicitly for the second usage)
- you have the branch you want to rebase currently checked out
If there is an issue, this script will most likely detect it and provide you with commands to
proceed.
'commit' arguments can be a branch name, tag, or commit hash.
This script will exit with 0 on success, 1 to indicate lint failure, and >1 if some other error
occurs.
''')
parser.add_argument("-c", "--clang-format", dest="clang_format", default='',
help='Command to use for clang-format; will also be passed to clang-format-diff.py.'
+ ' Defaults to environment variable SC_CLANG_FORMAT if it is set and non-empty,'
+ ' otherwise `clang-format`')
parser.add_argument("-b", "--base", dest="base_branch", help='Tries to rebase on the tip of this'
+ ' branch given a base branch name (experimental). This should be the main branch the'
+ ' current branch is based on (3.10 or develop)')
parser.add_argument("-d", "--clang-format-diff", dest="clang_format_diff", default='',
help='Command to use for clang-format-diff.py script'
+ ' Defaults to environment variable SC_CLANG_FORMAT_DIFF if it is set and non-empty,'
+ ' otherwise `clang-format-diff.py`')
parser.add_argument("command", help="command; one of lint, format, lintall, formatall, rebase")
parser.add_argument("commit1", help="for lint and format: commit to compare against (default: HEAD);" +
" for rebase: commit immediately prior to reformat", nargs='?', default='')
parser.add_argument("commit2", help="commit after reformat", nargs='?', default='')
parser.add_argument("target", help="target branch name (likely 3.10 or develop)", nargs='?', default='')
options = parser.parse_args()
options.clang_format = resolve_program_name(options.clang_format, 'SC_CLANG_FORMAT', 'clang-format')
options.clang_format_diff = resolve_program_name(options.clang_format_diff, 'SC_CLANG_FORMAT_DIFF', 'clang-format-diff.py')
try:
if options.command == 'lint' or options.command == 'format':
commit = 'HEAD' if options.commit1 == '' else options.commit1
# For portability, we use the full path of the clang-format-diff.py script. subprocess
# module on Windows won't be able to find a Python-executable python script in PATH, and
# if we invoke it with `python <script> <args>` then the python interpreter needs the
# full path of the script. Of course, the downside is that we use whatever `python`
# resolves to in the host system's shell.
clang_format_diff_path = which(options.clang_format_diff)
if clang_format_diff_path is None:
if options.clang_format_diff == 'clang-format-diff.py':
raise ValueError(
"Could not find clang-format-diff.py. "
"Please ensure that clang %s is installed and that "
"clang-format-diff.py is in your PATH."
% CLANG_FORMAT_ACCEPTED_VERSION_STRING)
else:
raise ValueError("Could not find clang-format-diff.py at %s." % options.clang_format_diff)
if options.command == 'lint':
do_lint(options.clang_format, clang_format_diff_path, commit)
else:
do_format(options.clang_format, clang_format_diff_path, commit)
elif options.command == 'lintall':
do_lintall(options.clang_format)
elif options.command == 'formatall':
do_formatall(options.clang_format)
elif options.command == 'rebase':
if not options.commit1 or not options.commit2 or not options.target:
if not options.base_branch:
parser.print_help()
sys.exit(2)
if options.base_branch == '3.10':
options.commit1 = 'tag-clang-format-3.10^'
options.commit2 = options.target = 'tag-clang-format-3.10'
elif options.base_branch == 'develop':
options.commit1 = 'tag-clang-format-develop^'
options.commit2 = options.target = 'tag-clang-format-develop'
else:
print("Don't know how to use this base branch: %s. Try using the three-argument " +
"version of rebase command")
sys.exit(3)
rebase_branch(options.clang_format, options.commit1, options.commit2, options.target)
else:
parser.print_help()
sys.exit(4)
except ValueError as ve:
# print entire traceback to aid in diagnosing issues
import traceback
traceback.print_tb(sys.exc_info()[2])
print("\n*** ERROR:\n" + str(ve) + "\n")
sys.exit(6)
if __name__ == "__main__":
main()
|
gpl-3.0
| 970,331,090,093,780,500 | -2,559,975,911,628,278,300 | 43.189873 | 147 | 0.630264 | false |
trungnt13/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
226
|
1384
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
| -2,182,977,687,430,927,600 | -287,232,138,277,078,430 | 36.405405 | 74 | 0.679191 | false |
BackupGGCode/python-for-android
|
python-modules/twisted/twisted/words/test/test_jabberxmlstream.py
|
49
|
42183
|
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.xmlstream}.
"""
from twisted.trial import unittest
from zope.interface.verify import verifyObject
from twisted.internet import defer, task
from twisted.internet.error import ConnectionLost
from twisted.internet.interfaces import IProtocolFactory
from twisted.test import proto_helpers
from twisted.words.test.test_xmlstream import GenericXmlStreamFactoryTestsMixin
from twisted.words.xish import domish
from twisted.words.protocols.jabber import error, ijabber, jid, xmlstream
NS_XMPP_TLS = 'urn:ietf:params:xml:ns:xmpp-tls'
class HashPasswordTest(unittest.TestCase):
"""
Tests for L{xmlstream.hashPassword}.
"""
def test_basic(self):
"""
The sid and secret are concatenated to calculate sha1 hex digest.
"""
hash = xmlstream.hashPassword(u"12345", u"secret")
self.assertEqual('99567ee91b2c7cabf607f10cb9f4a3634fa820e0', hash)
def test_sidNotUnicode(self):
"""
The session identifier must be a unicode object.
"""
self.assertRaises(TypeError, xmlstream.hashPassword, "\xc2\xb92345",
u"secret")
def test_passwordNotUnicode(self):
"""
The password must be a unicode object.
"""
self.assertRaises(TypeError, xmlstream.hashPassword, u"12345",
"secr\xc3\xa9t")
def test_unicodeSecret(self):
"""
The concatenated sid and password must be encoded to UTF-8 before hashing.
"""
hash = xmlstream.hashPassword(u"12345", u"secr\u00e9t")
self.assertEqual('659bf88d8f8e179081f7f3b4a8e7d224652d2853', hash)
class IQTest(unittest.TestCase):
"""
Tests both IQ and the associated IIQResponseTracker callback.
"""
def setUp(self):
authenticator = xmlstream.ConnectAuthenticator('otherhost')
authenticator.namespace = 'testns'
self.xmlstream = xmlstream.XmlStream(authenticator)
self.clock = task.Clock()
self.xmlstream._callLater = self.clock.callLater
self.xmlstream.makeConnection(proto_helpers.StringTransport())
self.xmlstream.dataReceived(
"<stream:stream xmlns:stream='http://etherx.jabber.org/streams' "
"xmlns='testns' from='otherhost' version='1.0'>")
self.iq = xmlstream.IQ(self.xmlstream, 'get')
def testBasic(self):
self.assertEquals(self.iq['type'], 'get')
self.assertTrue(self.iq['id'])
def testSend(self):
self.xmlstream.transport.clear()
self.iq.send()
self.assertEquals("<iq type='get' id='%s'/>" % self.iq['id'],
self.xmlstream.transport.value())
def testResultResponse(self):
def cb(result):
self.assertEquals(result['type'], 'result')
d = self.iq.send()
d.addCallback(cb)
xs = self.xmlstream
xs.dataReceived("<iq type='result' id='%s'/>" % self.iq['id'])
return d
def testErrorResponse(self):
d = self.iq.send()
self.assertFailure(d, error.StanzaError)
xs = self.xmlstream
xs.dataReceived("<iq type='error' id='%s'/>" % self.iq['id'])
return d
def testNonTrackedResponse(self):
"""
Test that untracked iq responses don't trigger any action.
Untracked means that the id of the incoming response iq is not
in the stream's C{iqDeferreds} dictionary.
"""
xs = self.xmlstream
xmlstream.upgradeWithIQResponseTracker(xs)
# Make sure we aren't tracking any iq's.
self.assertFalse(xs.iqDeferreds)
# Set up a fallback handler that checks the stanza's handled attribute.
# If that is set to True, the iq tracker claims to have handled the
# response.
def cb(iq):
self.assertFalse(getattr(iq, 'handled', False))
xs.addObserver("/iq", cb, -1)
# Receive an untracked iq response
xs.dataReceived("<iq type='result' id='test'/>")
def testCleanup(self):
"""
Test if the deferred associated with an iq request is removed
from the list kept in the L{XmlStream} object after it has
been fired.
"""
d = self.iq.send()
xs = self.xmlstream
xs.dataReceived("<iq type='result' id='%s'/>" % self.iq['id'])
self.assertNotIn(self.iq['id'], xs.iqDeferreds)
return d
def testDisconnectCleanup(self):
"""
Test if deferreds for iq's that haven't yet received a response
have their errback called on stream disconnect.
"""
d = self.iq.send()
xs = self.xmlstream
xs.connectionLost("Closed by peer")
self.assertFailure(d, ConnectionLost)
return d
def testNoModifyingDict(self):
"""
Test to make sure the errbacks cannot cause the iteration of the
iqDeferreds to blow up in our face.
"""
def eb(failure):
d = xmlstream.IQ(self.xmlstream).send()
d.addErrback(eb)
d = self.iq.send()
d.addErrback(eb)
self.xmlstream.connectionLost("Closed by peer")
return d
def testRequestTimingOut(self):
"""
Test that an iq request with a defined timeout times out.
"""
self.iq.timeout = 60
d = self.iq.send()
self.assertFailure(d, xmlstream.TimeoutError)
self.clock.pump([1, 60])
self.assertFalse(self.clock.calls)
self.assertFalse(self.xmlstream.iqDeferreds)
return d
def testRequestNotTimingOut(self):
"""
Test that an iq request with a defined timeout does not time out
when a response was received before the timeout period elapsed.
"""
self.iq.timeout = 60
d = self.iq.send()
self.clock.callLater(1, self.xmlstream.dataReceived,
"<iq type='result' id='%s'/>" % self.iq['id'])
self.clock.pump([1, 1])
self.assertFalse(self.clock.calls)
return d
def testDisconnectTimeoutCancellation(self):
"""
Test if timeouts for iq's that haven't yet received a response
are cancelled on stream disconnect.
"""
self.iq.timeout = 60
d = self.iq.send()
xs = self.xmlstream
xs.connectionLost("Closed by peer")
self.assertFailure(d, ConnectionLost)
self.assertFalse(self.clock.calls)
return d
class XmlStreamTest(unittest.TestCase):
def onStreamStart(self, obj):
self.gotStreamStart = True
def onStreamEnd(self, obj):
self.gotStreamEnd = True
def onStreamError(self, obj):
self.gotStreamError = True
def setUp(self):
"""
Set up XmlStream and several observers.
"""
self.gotStreamStart = False
self.gotStreamEnd = False
self.gotStreamError = False
xs = xmlstream.XmlStream(xmlstream.Authenticator())
xs.addObserver('//event/stream/start', self.onStreamStart)
xs.addObserver('//event/stream/end', self.onStreamEnd)
xs.addObserver('//event/stream/error', self.onStreamError)
xs.makeConnection(proto_helpers.StringTransportWithDisconnection())
xs.transport.protocol = xs
xs.namespace = 'testns'
xs.version = (1, 0)
self.xmlstream = xs
def test_sendHeaderBasic(self):
"""
Basic test on the header sent by sendHeader.
"""
xs = self.xmlstream
xs.sendHeader()
splitHeader = self.xmlstream.transport.value()[0:-1].split(' ')
self.assertIn("<stream:stream", splitHeader)
self.assertIn("xmlns:stream='http://etherx.jabber.org/streams'",
splitHeader)
self.assertIn("xmlns='testns'", splitHeader)
self.assertIn("version='1.0'", splitHeader)
self.assertTrue(xs._headerSent)
def test_sendHeaderAdditionalNamespaces(self):
"""
Test for additional namespace declarations.
"""
xs = self.xmlstream
xs.prefixes['jabber:server:dialback'] = 'db'
xs.sendHeader()
splitHeader = self.xmlstream.transport.value()[0:-1].split(' ')
self.assertIn("<stream:stream", splitHeader)
self.assertIn("xmlns:stream='http://etherx.jabber.org/streams'",
splitHeader)
self.assertIn("xmlns:db='jabber:server:dialback'", splitHeader)
self.assertIn("xmlns='testns'", splitHeader)
self.assertIn("version='1.0'", splitHeader)
self.assertTrue(xs._headerSent)
def test_sendHeaderInitiating(self):
"""
Test addressing when initiating a stream.
"""
xs = self.xmlstream
xs.thisEntity = jid.JID('thisHost')
xs.otherEntity = jid.JID('otherHost')
xs.initiating = True
xs.sendHeader()
splitHeader = xs.transport.value()[0:-1].split(' ')
self.assertIn("to='otherhost'", splitHeader)
self.assertIn("from='thishost'", splitHeader)
def test_sendHeaderReceiving(self):
"""
Test addressing when receiving a stream.
"""
xs = self.xmlstream
xs.thisEntity = jid.JID('thisHost')
xs.otherEntity = jid.JID('otherHost')
xs.initiating = False
xs.sid = 'session01'
xs.sendHeader()
splitHeader = xs.transport.value()[0:-1].split(' ')
self.assertIn("to='otherhost'", splitHeader)
self.assertIn("from='thishost'", splitHeader)
self.assertIn("id='session01'", splitHeader)
def test_receiveStreamError(self):
"""
Test events when a stream error is received.
"""
xs = self.xmlstream
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
xs.dataReceived("<stream:error/>")
self.assertTrue(self.gotStreamError)
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorInitiating(self):
"""
Test sendStreamError on an initiating xmlstream with a header sent.
An error should be sent out and the connection lost.
"""
xs = self.xmlstream
xs.initiating = True
xs.sendHeader()
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertNotEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorInitiatingNoHeader(self):
"""
Test sendStreamError on an initiating xmlstream without having sent a
header.
In this case, no header should be generated. Also, the error should
not be sent out on the stream. Just closing the connection.
"""
xs = self.xmlstream
xs.initiating = True
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertNot(xs._headerSent)
self.assertEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorReceiving(self):
"""
Test sendStreamError on a receiving xmlstream with a header sent.
An error should be sent out and the connection lost.
"""
xs = self.xmlstream
xs.initiating = False
xs.sendHeader()
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertNotEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_sendStreamErrorReceivingNoHeader(self):
"""
Test sendStreamError on a receiving xmlstream without having sent a
header.
In this case, a header should be generated. Then, the error should
be sent out on the stream followed by closing the connection.
"""
xs = self.xmlstream
xs.initiating = False
xs.transport.clear()
xs.sendStreamError(error.StreamError('version-unsupported'))
self.assertTrue(xs._headerSent)
self.assertNotEqual('', xs.transport.value())
self.assertTrue(self.gotStreamEnd)
def test_reset(self):
"""
Test resetting the XML stream to start a new layer.
"""
xs = self.xmlstream
xs.sendHeader()
stream = xs.stream
xs.reset()
self.assertNotEqual(stream, xs.stream)
self.assertNot(xs._headerSent)
def test_send(self):
"""
Test send with various types of objects.
"""
xs = self.xmlstream
xs.send('<presence/>')
self.assertEqual(xs.transport.value(), '<presence/>')
xs.transport.clear()
el = domish.Element(('testns', 'presence'))
xs.send(el)
self.assertEqual(xs.transport.value(), '<presence/>')
xs.transport.clear()
el = domish.Element(('http://etherx.jabber.org/streams', 'features'))
xs.send(el)
self.assertEqual(xs.transport.value(), '<stream:features/>')
def test_authenticator(self):
"""
Test that the associated authenticator is correctly called.
"""
connectionMadeCalls = []
streamStartedCalls = []
associateWithStreamCalls = []
class TestAuthenticator:
def connectionMade(self):
connectionMadeCalls.append(None)
def streamStarted(self, rootElement):
streamStartedCalls.append(rootElement)
def associateWithStream(self, xs):
associateWithStreamCalls.append(xs)
a = TestAuthenticator()
xs = xmlstream.XmlStream(a)
self.assertEqual([xs], associateWithStreamCalls)
xs.connectionMade()
self.assertEqual([None], connectionMadeCalls)
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
self.assertEqual(1, len(streamStartedCalls))
xs.reset()
self.assertEqual([None], connectionMadeCalls)
class TestError(Exception):
pass
class AuthenticatorTest(unittest.TestCase):
def setUp(self):
self.authenticator = xmlstream.Authenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
def test_streamStart(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.org' to='example.com' id='12345' "
"version='1.0'>")
self.assertEqual((1, 0), xs.version)
self.assertIdentical(None, xs.sid)
self.assertEqual('invalid', xs.namespace)
self.assertIdentical(None, xs.otherEntity)
self.assertEqual(None, xs.thisEntity)
def test_streamStartLegacy(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header for a pre-XMPP-1.0 header.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
self.assertEqual((0, 0), xs.version)
def test_streamBadVersionOneDigit(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header for a version with only one digit.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1'>")
self.assertEqual((0, 0), xs.version)
def test_streamBadVersionNoNumber(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header for a malformed version.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='blah'>")
self.assertEqual((0, 0), xs.version)
class ConnectAuthenticatorTest(unittest.TestCase):
def setUp(self):
self.gotAuthenticated = False
self.initFailure = None
self.authenticator = xmlstream.ConnectAuthenticator('otherHost')
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.addObserver('//event/stream/authd', self.onAuthenticated)
self.xmlstream.addObserver('//event/xmpp/initfailed', self.onInitFailed)
def onAuthenticated(self, obj):
self.gotAuthenticated = True
def onInitFailed(self, failure):
self.initFailure = failure
def testSucces(self):
"""
Test successful completion of an initialization step.
"""
class Initializer:
def initialize(self):
pass
init = Initializer()
self.xmlstream.initializers = [init]
self.authenticator.initializeStream()
self.assertEqual([], self.xmlstream.initializers)
self.assertTrue(self.gotAuthenticated)
def testFailure(self):
"""
Test failure of an initialization step.
"""
class Initializer:
def initialize(self):
raise TestError
init = Initializer()
self.xmlstream.initializers = [init]
self.authenticator.initializeStream()
self.assertEqual([init], self.xmlstream.initializers)
self.assertFalse(self.gotAuthenticated)
self.assertNotIdentical(None, self.initFailure)
self.assertTrue(self.initFailure.check(TestError))
def test_streamStart(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header.
"""
self.authenticator.namespace = 'testns'
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' to='example.org' id='12345' "
"version='1.0'>")
self.assertEqual((1, 0), xs.version)
self.assertEqual('12345', xs.sid)
self.assertEqual('testns', xs.namespace)
self.assertEqual('example.com', xs.otherEntity.host)
self.assertIdentical(None, xs.thisEntity)
self.assertNot(self.gotAuthenticated)
xs.dataReceived("<stream:features>"
"<test xmlns='testns'/>"
"</stream:features>")
self.assertIn(('testns', 'test'), xs.features)
self.assertTrue(self.gotAuthenticated)
class ListenAuthenticatorTest(unittest.TestCase):
"""
Tests for L{xmlstream.ListenAuthenticator}
"""
def setUp(self):
self.authenticator = xmlstream.ListenAuthenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
def test_streamStart(self):
"""
Test streamStart to fill the appropriate attributes from the
stream header.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
self.assertIdentical(None, xs.sid)
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.org' to='example.com' id='12345' "
"version='1.0'>")
self.assertEqual((1, 0), xs.version)
self.assertNotIdentical(None, xs.sid)
self.assertNotEquals('12345', xs.sid)
self.assertEqual('jabber:client', xs.namespace)
self.assertIdentical(None, xs.otherEntity)
self.assertEqual('example.com', xs.thisEntity.host)
def test_streamStartUnicodeSessionID(self):
"""
The generated session id must be a unicode object.
"""
xs = self.xmlstream
xs.makeConnection(proto_helpers.StringTransport())
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.org' to='example.com' id='12345' "
"version='1.0'>")
self.assertIsInstance(xs.sid, unicode)
class TLSInitiatingInitializerTest(unittest.TestCase):
def setUp(self):
self.output = []
self.done = []
self.savedSSL = xmlstream.ssl
self.authenticator = xmlstream.Authenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.init = xmlstream.TLSInitiatingInitializer(self.xmlstream)
def tearDown(self):
xmlstream.ssl = self.savedSSL
def testWantedSupported(self):
"""
Test start when TLS is wanted and the SSL library available.
"""
self.xmlstream.transport = proto_helpers.StringTransport()
self.xmlstream.transport.startTLS = lambda ctx: self.done.append('TLS')
self.xmlstream.reset = lambda: self.done.append('reset')
self.xmlstream.sendHeader = lambda: self.done.append('header')
d = self.init.start()
d.addCallback(self.assertEquals, xmlstream.Reset)
starttls = self.output[0]
self.assertEquals('starttls', starttls.name)
self.assertEquals(NS_XMPP_TLS, starttls.uri)
self.xmlstream.dataReceived("<proceed xmlns='%s'/>" % NS_XMPP_TLS)
self.assertEquals(['TLS', 'reset', 'header'], self.done)
return d
if not xmlstream.ssl:
testWantedSupported.skip = "SSL not available"
def testWantedNotSupportedNotRequired(self):
"""
Test start when TLS is wanted and the SSL library available.
"""
xmlstream.ssl = None
d = self.init.start()
d.addCallback(self.assertEquals, None)
self.assertEquals([], self.output)
return d
def testWantedNotSupportedRequired(self):
"""
Test start when TLS is wanted and the SSL library available.
"""
xmlstream.ssl = None
self.init.required = True
d = self.init.start()
self.assertFailure(d, xmlstream.TLSNotSupported)
self.assertEquals([], self.output)
return d
def testNotWantedRequired(self):
"""
Test start when TLS is not wanted, but required by the server.
"""
tls = domish.Element(('urn:ietf:params:xml:ns:xmpp-tls', 'starttls'))
tls.addElement('required')
self.xmlstream.features = {(tls.uri, tls.name): tls}
self.init.wanted = False
d = self.init.start()
self.assertEquals([], self.output)
self.assertFailure(d, xmlstream.TLSRequired)
return d
def testNotWantedNotRequired(self):
"""
Test start when TLS is not wanted, but required by the server.
"""
tls = domish.Element(('urn:ietf:params:xml:ns:xmpp-tls', 'starttls'))
self.xmlstream.features = {(tls.uri, tls.name): tls}
self.init.wanted = False
d = self.init.start()
d.addCallback(self.assertEqual, None)
self.assertEquals([], self.output)
return d
def testFailed(self):
"""
Test failed TLS negotiation.
"""
# Pretend that ssl is supported, it isn't actually used when the
# server starts out with a failure in response to our initial
# C{starttls} stanza.
xmlstream.ssl = 1
d = self.init.start()
self.assertFailure(d, xmlstream.TLSFailed)
self.xmlstream.dataReceived("<failure xmlns='%s'/>" % NS_XMPP_TLS)
return d
class TestFeatureInitializer(xmlstream.BaseFeatureInitiatingInitializer):
feature = ('testns', 'test')
def start(self):
return defer.succeed(None)
class BaseFeatureInitiatingInitializerTest(unittest.TestCase):
def setUp(self):
self.xmlstream = xmlstream.XmlStream(xmlstream.Authenticator())
self.init = TestFeatureInitializer(self.xmlstream)
def testAdvertized(self):
"""
Test that an advertized feature results in successful initialization.
"""
self.xmlstream.features = {self.init.feature:
domish.Element(self.init.feature)}
return self.init.initialize()
def testNotAdvertizedRequired(self):
"""
Test that when the feature is not advertized, but required by the
initializer, an exception is raised.
"""
self.init.required = True
self.assertRaises(xmlstream.FeatureNotAdvertized, self.init.initialize)
def testNotAdvertizedNotRequired(self):
"""
Test that when the feature is not advertized, and not required by the
initializer, the initializer silently succeeds.
"""
self.init.required = False
self.assertIdentical(None, self.init.initialize())
class ToResponseTest(unittest.TestCase):
def test_toResponse(self):
"""
Test that a response stanza is generated with addressing swapped.
"""
stanza = domish.Element(('jabber:client', 'iq'))
stanza['type'] = 'get'
stanza['to'] = '[email protected]'
stanza['from'] = '[email protected]/resource'
stanza['id'] = 'stanza1'
response = xmlstream.toResponse(stanza, 'result')
self.assertNotIdentical(stanza, response)
self.assertEqual(response['from'], '[email protected]')
self.assertEqual(response['to'], '[email protected]/resource')
self.assertEqual(response['type'], 'result')
self.assertEqual(response['id'], 'stanza1')
def test_toResponseNoFrom(self):
"""
Test that a response is generated from a stanza without a from address.
"""
stanza = domish.Element(('jabber:client', 'iq'))
stanza['type'] = 'get'
stanza['to'] = '[email protected]'
response = xmlstream.toResponse(stanza)
self.assertEqual(response['from'], '[email protected]')
self.assertFalse(response.hasAttribute('to'))
def test_toResponseNoTo(self):
"""
Test that a response is generated from a stanza without a to address.
"""
stanza = domish.Element(('jabber:client', 'iq'))
stanza['type'] = 'get'
stanza['from'] = '[email protected]/resource'
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('from'))
self.assertEqual(response['to'], '[email protected]/resource')
def test_toResponseNoAddressing(self):
"""
Test that a response is generated from a stanza without any addressing.
"""
stanza = domish.Element(('jabber:client', 'message'))
stanza['type'] = 'chat'
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('to'))
self.assertFalse(response.hasAttribute('from'))
def test_noID(self):
"""
Test that a proper response is generated without id attribute.
"""
stanza = domish.Element(('jabber:client', 'message'))
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('id'))
def test_noType(self):
"""
Test that a proper response is generated without type attribute.
"""
stanza = domish.Element(('jabber:client', 'message'))
response = xmlstream.toResponse(stanza)
self.assertFalse(response.hasAttribute('type'))
class DummyFactory(object):
"""
Dummy XmlStream factory that only registers bootstrap observers.
"""
def __init__(self):
self.callbacks = {}
def addBootstrap(self, event, callback):
self.callbacks[event] = callback
class DummyXMPPHandler(xmlstream.XMPPHandler):
"""
Dummy XMPP subprotocol handler to count the methods are called on it.
"""
def __init__(self):
self.doneMade = 0
self.doneInitialized = 0
self.doneLost = 0
def makeConnection(self, xs):
self.connectionMade()
def connectionMade(self):
self.doneMade += 1
def connectionInitialized(self):
self.doneInitialized += 1
def connectionLost(self, reason):
self.doneLost += 1
class XMPPHandlerTest(unittest.TestCase):
"""
Tests for L{xmlstream.XMPPHandler}.
"""
def test_interface(self):
"""
L{xmlstream.XMPPHandler} implements L{ijabber.IXMPPHandler}.
"""
verifyObject(ijabber.IXMPPHandler, xmlstream.XMPPHandler())
def test_send(self):
"""
Test that data is passed on for sending by the stream manager.
"""
class DummyStreamManager(object):
def __init__(self):
self.outlist = []
def send(self, data):
self.outlist.append(data)
handler = xmlstream.XMPPHandler()
handler.parent = DummyStreamManager()
handler.send('<presence/>')
self.assertEquals(['<presence/>'], handler.parent.outlist)
def test_makeConnection(self):
"""
Test that makeConnection saves the XML stream and calls connectionMade.
"""
class TestXMPPHandler(xmlstream.XMPPHandler):
def connectionMade(self):
self.doneMade = True
handler = TestXMPPHandler()
xs = xmlstream.XmlStream(xmlstream.Authenticator())
handler.makeConnection(xs)
self.assertTrue(handler.doneMade)
self.assertIdentical(xs, handler.xmlstream)
def test_connectionLost(self):
"""
Test that connectionLost forgets the XML stream.
"""
handler = xmlstream.XMPPHandler()
xs = xmlstream.XmlStream(xmlstream.Authenticator())
handler.makeConnection(xs)
handler.connectionLost(Exception())
self.assertIdentical(None, handler.xmlstream)
class XMPPHandlerCollectionTest(unittest.TestCase):
"""
Tests for L{xmlstream.XMPPHandlerCollection}.
"""
def setUp(self):
self.collection = xmlstream.XMPPHandlerCollection()
def test_interface(self):
"""
L{xmlstream.StreamManager} implements L{ijabber.IXMPPHandlerCollection}.
"""
verifyObject(ijabber.IXMPPHandlerCollection, self.collection)
def test_addHandler(self):
"""
Test the addition of a protocol handler.
"""
handler = DummyXMPPHandler()
handler.setHandlerParent(self.collection)
self.assertIn(handler, self.collection)
self.assertIdentical(self.collection, handler.parent)
def test_removeHandler(self):
"""
Test removal of a protocol handler.
"""
handler = DummyXMPPHandler()
handler.setHandlerParent(self.collection)
handler.disownHandlerParent(self.collection)
self.assertNotIn(handler, self.collection)
self.assertIdentical(None, handler.parent)
class StreamManagerTest(unittest.TestCase):
"""
Tests for L{xmlstream.StreamManager}.
"""
def setUp(self):
factory = DummyFactory()
self.streamManager = xmlstream.StreamManager(factory)
def test_basic(self):
"""
Test correct initialization and setup of factory observers.
"""
sm = self.streamManager
self.assertIdentical(None, sm.xmlstream)
self.assertEquals([], sm.handlers)
self.assertEquals(sm._connected,
sm.factory.callbacks['//event/stream/connected'])
self.assertEquals(sm._authd,
sm.factory.callbacks['//event/stream/authd'])
self.assertEquals(sm._disconnected,
sm.factory.callbacks['//event/stream/end'])
self.assertEquals(sm.initializationFailed,
sm.factory.callbacks['//event/xmpp/initfailed'])
def test_connected(self):
"""
Test that protocol handlers have their connectionMade method called
when the XML stream is connected.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
self.assertEquals(1, handler.doneMade)
self.assertEquals(0, handler.doneInitialized)
self.assertEquals(0, handler.doneLost)
def test_connectedLogTrafficFalse(self):
"""
Test raw data functions unset when logTraffic is set to False.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
self.assertIdentical(None, xs.rawDataInFn)
self.assertIdentical(None, xs.rawDataOutFn)
def test_connectedLogTrafficTrue(self):
"""
Test raw data functions set when logTraffic is set to True.
"""
sm = self.streamManager
sm.logTraffic = True
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
self.assertNotIdentical(None, xs.rawDataInFn)
self.assertNotIdentical(None, xs.rawDataOutFn)
def test_authd(self):
"""
Test that protocol handlers have their connectionInitialized method
called when the XML stream is initialized.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._authd(xs)
self.assertEquals(0, handler.doneMade)
self.assertEquals(1, handler.doneInitialized)
self.assertEquals(0, handler.doneLost)
def test_disconnected(self):
"""
Test that protocol handlers have their connectionLost method
called when the XML stream is disconnected.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._disconnected(xs)
self.assertEquals(0, handler.doneMade)
self.assertEquals(0, handler.doneInitialized)
self.assertEquals(1, handler.doneLost)
def test_addHandler(self):
"""
Test the addition of a protocol handler while not connected.
"""
sm = self.streamManager
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
self.assertEquals(0, handler.doneMade)
self.assertEquals(0, handler.doneInitialized)
self.assertEquals(0, handler.doneLost)
def test_addHandlerInitialized(self):
"""
Test the addition of a protocol handler after the stream
have been initialized.
Make sure that the handler will have the connected stream
passed via C{makeConnection} and have C{connectionInitialized}
called.
"""
sm = self.streamManager
xs = xmlstream.XmlStream(xmlstream.Authenticator())
sm._connected(xs)
sm._authd(xs)
handler = DummyXMPPHandler()
handler.setHandlerParent(sm)
self.assertEquals(1, handler.doneMade)
self.assertEquals(1, handler.doneInitialized)
self.assertEquals(0, handler.doneLost)
def test_sendInitialized(self):
"""
Test send when the stream has been initialized.
The data should be sent directly over the XML stream.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
xs = factory.buildProtocol(None)
xs.transport = proto_helpers.StringTransport()
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
xs.dispatch(xs, "//event/stream/authd")
sm.send("<presence/>")
self.assertEquals("<presence/>", xs.transport.value())
def test_sendNotConnected(self):
"""
Test send when there is no established XML stream.
The data should be cached until an XML stream has been established and
initialized.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
handler = DummyXMPPHandler()
sm.addHandler(handler)
xs = factory.buildProtocol(None)
xs.transport = proto_helpers.StringTransport()
sm.send("<presence/>")
self.assertEquals("", xs.transport.value())
self.assertEquals("<presence/>", sm._packetQueue[0])
xs.connectionMade()
self.assertEquals("", xs.transport.value())
self.assertEquals("<presence/>", sm._packetQueue[0])
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
xs.dispatch(xs, "//event/stream/authd")
self.assertEquals("<presence/>", xs.transport.value())
self.assertFalse(sm._packetQueue)
def test_sendNotInitialized(self):
"""
Test send when the stream is connected but not yet initialized.
The data should be cached until the XML stream has been initialized.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
xs = factory.buildProtocol(None)
xs.transport = proto_helpers.StringTransport()
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345'>")
sm.send("<presence/>")
self.assertEquals("", xs.transport.value())
self.assertEquals("<presence/>", sm._packetQueue[0])
def test_sendDisconnected(self):
"""
Test send after XML stream disconnection.
The data should be cached until a new XML stream has been established
and initialized.
"""
factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator())
sm = xmlstream.StreamManager(factory)
handler = DummyXMPPHandler()
sm.addHandler(handler)
xs = factory.buildProtocol(None)
xs.connectionMade()
xs.transport = proto_helpers.StringTransport()
xs.connectionLost(None)
sm.send("<presence/>")
self.assertEquals("", xs.transport.value())
self.assertEquals("<presence/>", sm._packetQueue[0])
class XmlStreamServerFactoryTest(GenericXmlStreamFactoryTestsMixin):
"""
Tests for L{xmlstream.XmlStreamServerFactory}.
"""
def setUp(self):
"""
Set up a server factory with a authenticator factory function.
"""
class TestAuthenticator(object):
def __init__(self):
self.xmlstreams = []
def associateWithStream(self, xs):
self.xmlstreams.append(xs)
def authenticatorFactory():
return TestAuthenticator()
self.factory = xmlstream.XmlStreamServerFactory(authenticatorFactory)
def test_interface(self):
"""
L{XmlStreamServerFactory} is a L{Factory}.
"""
verifyObject(IProtocolFactory, self.factory)
def test_buildProtocolAuthenticatorInstantiation(self):
"""
The authenticator factory should be used to instantiate the
authenticator and pass it to the protocol.
The default protocol, L{XmlStream} stores the authenticator it is
passed, and calls its C{associateWithStream} method. so we use that to
check whether our authenticator factory is used and the protocol
instance gets an authenticator.
"""
xs = self.factory.buildProtocol(None)
self.assertEquals([xs], xs.authenticator.xmlstreams)
def test_buildProtocolXmlStream(self):
"""
The protocol factory creates Jabber XML Stream protocols by default.
"""
xs = self.factory.buildProtocol(None)
self.assertIsInstance(xs, xmlstream.XmlStream)
def test_buildProtocolTwice(self):
"""
Subsequent calls to buildProtocol should result in different instances
of the protocol, as well as their authenticators.
"""
xs1 = self.factory.buildProtocol(None)
xs2 = self.factory.buildProtocol(None)
self.assertNotIdentical(xs1, xs2)
self.assertNotIdentical(xs1.authenticator, xs2.authenticator)
|
apache-2.0
| 2,853,343,039,343,616,500 | -1,939,588,201,625,194,500 | 31.348926 | 82 | 0.616481 | false |
ThinkOpen-Solutions/odoo
|
addons/purchase/edi/__init__.py
|
448
|
1069
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 9,051,451,223,473,481,000 | -1,956,260,610,957,000,400 | 43.541667 | 78 | 0.618335 | false |
ejpbruel/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py
|
652
|
5978
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base stream class.
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import socket
from mod_pywebsocket import util
# Exceptions
class ConnectionTerminatedException(Exception):
"""This exception will be raised when a connection is terminated
unexpectedly.
"""
pass
class InvalidFrameException(ConnectionTerminatedException):
"""This exception will be raised when we received an invalid frame we
cannot parse.
"""
pass
class BadOperationException(Exception):
"""This exception will be raised when send_message() is called on
server-terminated connection or receive_message() is called on
client-terminated connection.
"""
pass
class UnsupportedFrameException(Exception):
"""This exception will be raised when we receive a frame with flag, opcode
we cannot handle. Handlers can just catch and ignore this exception and
call receive_message() again to continue processing the next frame.
"""
pass
class InvalidUTF8Exception(Exception):
"""This exception will be raised when we receive a text frame which
contains invalid UTF-8 strings.
"""
pass
class StreamBase(object):
"""Base stream class."""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
self._logger = util.get_class_logger(self)
self._request = request
def _read(self, length):
"""Reads length bytes from connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
try:
read_bytes = self._request.connection.read(length)
if not read_bytes:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Peer (%r) closed connection' %
(length, (self._request.connection.remote_addr,)))
return read_bytes
except socket.error, e:
# Catch a socket.error. Because it's not a child class of the
# IOError prior to Python 2.6, we cannot omit this except clause.
# Use %s rather than %r for the exception to use human friendly
# format.
raise ConnectionTerminatedException(
'Receiving %d byte failed. socket.error (%s) occurred' %
(length, e))
except IOError, e:
# Also catch an IOError because mod_python throws it.
raise ConnectionTerminatedException(
'Receiving %d byte failed. IOError (%s) occurred' %
(length, e))
def _write(self, bytes_to_write):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
self._request.connection.write(bytes_to_write)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._request.connection.remote_addr,),
e)
raise
def receive_bytes(self, length):
"""Receives multiple bytes. Retries read when we couldn't receive the
specified amount.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while length > 0:
new_read_bytes = self._read(length)
read_bytes.append(new_read_bytes)
length -= len(new_read_bytes)
return ''.join(read_bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
contain delim_char.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
read_bytes.append(ch)
return ''.join(read_bytes)
# vi:sts=4 sw=4 et
|
mpl-2.0
| -4,210,880,349,875,657,000 | 5,047,440,047,324,120,000 | 32.027624 | 79 | 0.658916 | false |
mrkarthik07/libforensics
|
code/lf/win/shell/link/ctypes.py
|
13
|
2667
|
# Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Ctypes classes for shell link files"""
# local imports
from lf.win.shell.link.dtypes import (
HotKey, ShellLinkHeader, LinkInfoHeader, VolumeIDHeader, CNRLHeader,
ConsoleDataBlock, ConsoleFEDataBlock, DarwinDataBlock,
ExpandableStringsDataBlock, EnvironmentVariableDataBlock,
IconEnvironmentDataBlock, DarwinDataBlock, KnownFolderDataBlock,
SpecialFolderDataBlock, TrackerDataBlock, TrackerDataBlockFooter,
DataBlockHeader, FileAttributes, LinkFlags, DomainRelativeObjId
)
__docformat__ = "restructuredtext en"
__all__ = [
"hot_key", "shell_link_header", "link_info_header", "volume_id_header",
"cnrl_header", "console_data_block", "console_fe_data_block",
"darwin_data_block", "expandable_strings_data_block",
"environment_variable_data_block", "icon_environment_data_block",
"known_folder_data_block", "special_folder_data_block",
"tracker_data_block", "tracker_data_block_footer", "data_block_header",
"file_attributes", "link_flags", "domain_relative_obj_id"
]
hot_key = HotKey._ctype_
shell_link_header = ShellLinkHeader._ctype_
link_info_header = LinkInfoHeader._ctype_
volume_id_header = VolumeIDHeader._ctype_
cnrl_header = CNRLHeader._ctype_
console_data_block = ConsoleDataBlock._ctype_
console_fe_data_block = ConsoleFEDataBlock._ctype_
darwin_data_block = DarwinDataBlock._ctype_
environment_variable_data_block = EnvironmentVariableDataBlock._ctype_
expandable_strings_data_block = ExpandableStringsDataBlock._ctype_
icon_environment_data_block = IconEnvironmentDataBlock._ctype_
known_folder_data_block = KnownFolderDataBlock._ctype_
special_folder_data_block = SpecialFolderDataBlock._ctype_
tracker_data_block = TrackerDataBlock._ctype_
tracker_data_block_footer = TrackerDataBlockFooter._ctype_
data_block_header = DataBlockHeader._ctype_
file_attributes = FileAttributes._ctype_
link_flags = LinkFlags._ctype_
domain_relative_obj_id = DomainRelativeObjId._ctype_
|
gpl-3.0
| -4,160,344,117,140,918,300 | 3,567,938,420,206,560,000 | 44.20339 | 77 | 0.773528 | false |
bellettif/MultimodePIF
|
PythonWksp/multiPIF/OSM/native/network_tools.py
|
1
|
6544
|
'''
Created on Jan 5, 2015
@author: Francois Belletti
'''
import numpy as np
from OSM.misc.geoTools import computeDist
#
# Need to change that distance
#
def l2_dist(node_1, node_2):
lat_1 = float(node_1.get('lat'))
lon_1 = float(node_1.get('lon'))
lat_2 = float(node_2.get('lat'))
lon_2 = float(node_2.get('lon'))
return computeDist(lon_1, lat_1, lon_2, lat_2)
#
# Extract a given target network (road, rail, subway)
# from a list of ways as well as the references
# to all nodes (as a dict)
# Returns the edges as a list, the nodes as the keys
# of a dict recaping all their parents
#
def extract_edges(ways, target_key, target_values):
node_seqs = []
node_refs = {}
#
for way in ways:
ref_tags = way.findall('tag')
in_network = False
for ref_tag in ref_tags:
k = ref_tag.get('k')
v = ref_tag.get('v')
if k == target_key and v in target_values:
in_network = True
break
if not in_network:
continue
else:
node_seqs.append(way)
way_id = way.get('id')
child_nodes = way.findall('nd')
for child_node in child_nodes:
node_id = child_node.get('ref')
if node_id not in node_refs:
node_refs[node_id] = []
node_refs[node_id].append(way_id)
return node_seqs, node_refs
#
# Split ways into links between intersections
#
def split_ways(node_seqs, node_refs, node_dict, target_features):
# Convenience compute distance function
def cmpt_length(node_seq):
dist = 0
for i in range(len(node_seq) - 1):
prev_node = node_dict[node_seq[i].get('ref')]
next_node = node_dict[node_seq[i+1].get('ref')]
dist += l2_dist(prev_node, next_node)
return dist
#
all_features = ['length', 'node_seq']
all_features.extend(target_features)
#
links = []
for node_seq in node_seqs:
features = dict.fromkeys(target_features, None)
for tag in node_seq.findall('tag'):
k = tag.get('k')
if k not in features:
continue
v = tag.get('v')
features[k] = v
node_seq = node_seq.findall('nd')
ref_seq = [len(node_refs[x.get('ref')]) for x in node_seq]
cut_indices = []
ref_seq_len = len(ref_seq)
for i, n_ref in enumerate(ref_seq):
if n_ref > 1 and (i != 0) and (i != (ref_seq_len - 1)):
cut_indices.append(i)
sub_links = []
cut_indices_len = len(cut_indices)
# For factoring
def sub_seq_summary(sub_sequence):
sub_seq_length = cmpt_length(sub_sequence)
summary = dict.fromkeys(all_features, None)
summary['length'] = sub_seq_length # in meters
summary['node_seq'] = sub_sequence
for feature in target_features:
summary[feature] = features[feature]
return summary
#
if cut_indices_len > 0:
for i, cut_index in enumerate(cut_indices):
if i == 0:
sub_seq = node_seq[:cut_index + 1] # reach for 1 more to create overlap
sub_links.append(sub_seq_summary(sub_seq))
if i < (cut_indices_len - 1):
sub_seq = node_seq[cut_index:cut_indices[i+1] + 1]
sub_links.append(sub_seq_summary(sub_seq))
if i == (cut_indices_len - 1):
sub_seq = node_seq[cut_index:]
sub_links.append(sub_seq_summary(sub_seq))
else:
sub_seq = node_seq
sub_links = [sub_seq_summary(sub_seq)]
links.extend(sub_links)
return links
#
# From the links (dual graph) build the nodes and link
# them one to another (primal graph)
# The result is a dict[node] = [neighbours]
#
def link_nodes(links, target_features):
linked_nodes = {}
for link in links:
leftmost_node_id = link['node_seq'][0].get('ref')
rightmost_node_id = link['node_seq'][-1].get('ref')
# The following work on the set of features
# needs to become dynamic
edge_dict = {}
edge_dict['length'] = link['length']
for feature in target_features:
edge_dict[feature] = link[feature]
#
# Create edges in graph (vertices are keys)
# So far one ways are not taken into account
#
# Link left to right
if leftmost_node_id not in linked_nodes:
linked_nodes[leftmost_node_id] = {}
linked_nodes[leftmost_node_id][rightmost_node_id] = \
edge_dict
# Link right to left
if rightmost_node_id not in linked_nodes:
linked_nodes[rightmost_node_id] = {}
linked_nodes[rightmost_node_id][leftmost_node_id] = \
edge_dict
return linked_nodes
#
# Reduce network for routing applications, get rid of
# nodes that do not correspond to intersections
#
def reduce_network(linked_nodes,
feature_list):
to_delete = []
for node_id, neighbours in linked_nodes.iteritems():
if len(neighbours) == 2:
neigh_items = neighbours.items()
left_neigh , left_features = neigh_items[0]
right_neigh , right_features = neigh_items[1]
# Check that properties are identical
identical = True
for feature in feature_list:
if left_features[feature] != right_features[feature]:
identical = False
if not identical:
continue
# Collapse node with node_id into neighbours
tot_length = left_features['length'] + \
right_features['length']
merge_features = {}
merge_features['length'] = tot_length
for feature in feature_list:
merge_features[feature] = left_features[feature]
#
linked_nodes[right_neigh][left_neigh] = merge_features
del linked_nodes[right_neigh][node_id]
#
linked_nodes[left_neigh][right_neigh] = merge_features
del linked_nodes[left_neigh][node_id]
#
to_delete.append(node_id)
for node_id in to_delete:
del linked_nodes[node_id]
|
gpl-2.0
| 4,734,033,859,822,257,000 | -6,400,983,416,098,366,000 | 34.570652 | 92 | 0.541106 | false |
jvantuyl/exim_ses_transport
|
setup.py
|
1
|
1915
|
"""
setuptools installer for exim_ses_transport
"""
# Copyright 2011, Jayson Vantuyl <[email protected]>
#
# This file is part of exim_ses_transport.
#
# exim_ses_transport is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# exim_ses_transport is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with exim_ses_transport. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
setup(
name = "EximSesTransport",
version = "0.4",
description = 'An Amazon SES transport for the Exim MTA.',
platforms = [ 'any' ],
author = "Jayson Vantuyl",
author_email = "[email protected]",
long_description = """Amazon's cloud includes a service to send e-mail through their infrastructure.
While this is useful, sometimes there's just no substitute for an MTA. This
transport allows you to selectively integrate Amazon's SES with one of the
Internet's most powerful MTAs, Exim.
""",
url = 'https://github.com/jvantuyl/exim_ses_transport',
license = "http://www.gnu.org/copyleft/lesser.html",
classifiers = [
'Topic :: Communications :: Email :: Filters',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)'
],
packages = ['exim_ses_transport'],
install_requires = ['boto>=2.0', 'dkimpy>=0.5'],
dependency_links = ['https://launchpad.net/dkimpy/trunk/0.5.4/+download/dkimpy-0.5.4.tar.gz#egg=dkimpy-0.5.4'],
entry_points = { 'console_scripts': [ 'exim_ses_transport=exim_ses_transport.run:main' ] }
)
|
lgpl-3.0
| 7,470,567,041,169,824,000 | 5,551,055,376,821,038,000 | 39.744681 | 112 | 0.731593 | false |
SteerSuite/steersuite-rutgers
|
steerstats/tests/parse-test.py
|
8
|
2156
|
import psycopg2
from steersuite import LogParser
from steersuitedb import Composite1Benchmark
# import steersuitedb.Composite1Benchmark
logfile = open('ppr/test.log', 'r')
lparser = LogParser.LogParser()
out = lparser.parseLog(logfile)
con = psycopg2.connect(database='steersuitedb', user='steeruser', password='steersuite')
cur = con.cursor()
benchmark = Composite1Benchmark.Composite1Benchmark(1,
out[2][0],
out[2][1],
out[2][2],
out[2][3],
out[2][4],
out[2][5],
out[2][6],
out[2][7],
out[2][8],
out[2][9],
out[2][10],
out[2][11],
out[2][12],
out[2][13],
out[2][14],
out[2][15],
out[2][16],
out[2][17],
out[2][18],
out[2][19],
out[2][20],
out[2][21],
out[2][22]
)
print out
print ":"
print out[2][3]
print benchmark._total_change_in_speed
cur.execute("Select * from nextval('algorithm_seq')")
row = cur.fetchone()
print row
print row[0]
status = benchmark.insertTest2(cur)
logfile.close()
|
gpl-3.0
| -479,408,416,279,645,600 | 2,932,955,209,397,924,400 | 38.925926 | 89 | 0.285714 | false |
atsuyim/readthedocs.org
|
readthedocs/restapi/permissions.py
|
18
|
2215
|
from rest_framework import permissions
from readthedocs.privacy.backend import AdminPermission
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.users.all()
class CommentModeratorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, comment):
if request.method in permissions.SAFE_METHODS:
return True # TODO: Similar logic to #1084
else:
return AdminPermission.is_admin(request.user, comment.node.project)
class RelatedProjectIsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.project.users.all()
class APIPermission(permissions.IsAuthenticatedOrReadOnly):
'''
This permission should allow authenicated users readonly access to the API,
and allow admin users write access. This should be used on API resources
that need to implement write operations to resources that were based on the
ReadOnlyViewSet
'''
def has_object_permission(self, request, view, obj):
has_perm = super(APIPermission, self).has_object_permission(
request, view, obj)
return has_perm or (request.user and request.user.is_staff)
class APIRestrictedPermission(permissions.IsAdminUser):
"""Allow admin write, authenticated and anonymous read only
This differs from :py:cls:`APIPermission` by not allowing for authenticated
POSTs. This permission is endpoints like ``/api/v2/build/``, which are used
by admin users to coordinate build instance creation, but only should be
readable by end users.
"""
def has_object_permission(self, request, view, obj):
return (
request.method in permissions.SAFE_METHODS or
(request.user and request.user.is_staff)
)
|
mit
| 4,834,955,639,133,474,000 | -5,090,731,357,313,800,000 | 35.311475 | 79 | 0.709707 | false |
phil-lopreiato/the-blue-alliance
|
tests/models_tests/notifications/test_awards.py
|
2
|
6181
|
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.award_type import AwardType
from consts.event_type import EventType
from consts.notification_type import NotificationType
from models.award import Award
from models.event import Event
from models.team import Team
from models.notifications.awards import AwardsNotification
class TestAwardsNotification(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache()
self.testbed.init_taskqueue_stub(root_path='.')
self.event = Event(
id='2020miket',
event_type_enum=EventType.DISTRICT,
short_name='Kettering University #1',
name='FIM District Kettering University Event #1',
event_short='miket',
year=2020
)
self.team = Team(
id='frc7332',
team_number=7332
)
self.award = Award(
id=Award.render_key_name(self.event.key_name, AwardType.INDUSTRIAL_DESIGN),
name_str='Industrial Design Award sponsored by General Motors',
award_type_enum=AwardType.INDUSTRIAL_DESIGN,
event=self.event.key,
event_type_enum=EventType.DISTRICT,
year=2020
)
self.winner_award = Award(
id=Award.render_key_name(self.event.key_name, AwardType.WINNER),
name_str='District Event Winner',
award_type_enum=AwardType.WINNER,
event=self.event.key,
event_type_enum=EventType.DISTRICT,
year=2020
)
def tearDown(self):
self.testbed.deactivate()
def test_type(self):
notification = AwardsNotification(self.event)
self.assertEqual(AwardsNotification._type(), NotificationType.AWARDS)
def test_fcm_notification_event(self):
notification = AwardsNotification(self.event)
self.assertIsNotNone(notification.fcm_notification)
self.assertEqual(notification.fcm_notification.title, 'MIKET Awards')
self.assertEqual(notification.fcm_notification.body, '2020 Kettering University #1 District awards have been posted.')
def test_fcm_notification_team(self):
self.award.team_list = [self.team.key]
self.award.put()
notification = AwardsNotification(self.event, self.team)
self.assertIsNotNone(notification.fcm_notification)
self.assertEqual(notification.fcm_notification.title, 'Team 7332 Awards')
self.assertEqual(notification.fcm_notification.body, 'Team 7332 won the Industrial Design Award sponsored by General Motors at the 2020 Kettering University #1 District.')
def test_fcm_notification_team_winner(self):
self.winner_award.team_list = [self.team.key]
self.winner_award.put()
notification = AwardsNotification(self.event, self.team)
self.assertIsNotNone(notification.fcm_notification)
self.assertEqual(notification.fcm_notification.title, 'Team 7332 Awards')
self.assertEqual(notification.fcm_notification.body, 'Team 7332 is the District Event Winner at the 2020 Kettering University #1 District.')
def test_fcm_notification_team_finalist(self):
self.winner_award.award_type_enum=AwardType.WINNER
self.winner_award.name_str='District Event Finalist'
self.winner_award.team_list = [self.team.key]
self.winner_award.put()
notification = AwardsNotification(self.event, self.team)
self.assertIsNotNone(notification.fcm_notification)
self.assertEqual(notification.fcm_notification.title, 'Team 7332 Awards')
self.assertEqual(notification.fcm_notification.body, 'Team 7332 is the District Event Finalist at the 2020 Kettering University #1 District.')
def test_fcm_notification_team_multiple(self):
self.award.team_list = [self.team.key]
self.award.put()
self.winner_award.team_list = [self.team.key]
self.winner_award.put()
notification = AwardsNotification(self.event, self.team)
self.assertIsNotNone(notification.fcm_notification)
self.assertEqual(notification.fcm_notification.title, 'Team 7332 Awards')
self.assertEqual(notification.fcm_notification.body, 'Team 7332 won 2 awards at the 2020 Kettering University #1 District.')
def test_data_payload(self):
notification = AwardsNotification(self.event)
# No `event_name`
payload = notification.data_payload
self.assertEqual(len(payload), 1)
self.assertEqual(payload['event_key'], '2020miket')
def test_data_payload_team(self):
notification = AwardsNotification(self.event, self.team)
payload = notification.data_payload
self.assertEqual(len(payload), 2)
self.assertEqual(payload['event_key'], '2020miket')
self.assertEqual(payload['team_key'], 'frc7332')
def test_webhook_message_data(self):
self.award.put()
self.winner_award.put()
notification = AwardsNotification(self.event)
payload = notification.webhook_message_data
self.assertEqual(len(payload), 3)
self.assertEqual(payload['event_key'], '2020miket')
self.assertEqual(payload['event_name'], 'FIM District Kettering University Event #1')
self.assertIsNotNone(payload['awards'])
self.assertEqual(len(payload['awards']), 2)
def test_webhook_message_data_team(self):
self.award.team_list = [self.team.key]
self.award.put()
notification = AwardsNotification(self.event, self.team)
payload = notification.webhook_message_data
self.assertEqual(len(payload), 4)
self.assertEqual(payload['event_key'], '2020miket')
self.assertEqual(payload['team_key'], 'frc7332')
self.assertEqual(payload['event_name'], 'FIM District Kettering University Event #1')
self.assertIsNotNone(payload['awards'])
self.assertEqual(len(payload['awards']), 1)
|
mit
| -7,647,717,524,783,904,000 | -4,011,050,970,727,529,000 | 38.877419 | 179 | 0.678046 | false |
celibertojr/Kbsim
|
Bots/Renderbot.py
|
2
|
10513
|
"""
Renderbot
Renderbot is an advanced bot used in conjunction with the
designer module. It demonstrates arbitrary pattern formation.
Based on the Abot, see it for some details.
NOTE: The next position is defined by two existing reference
points in the pattern. Bots bumping into each other is an
issue and inaccurate positioning makes merry hell of the
whole structure, causing complete breakdowns at times,
as failing gracefully is not always an option.
"""
from Kilobot import *
def load(sim):
return Renderbot(sim)
# msgs, dodge lsb
GO = 0x8; SCATTER = 0xA; DONE = 0xC;
# magic consts
TURN = 19; TRIGGER = 5
FRINGE = 67
ANON = -1
RXBUFSIZE = 4
class Renderbot(Kilobot):
def __init__(self, sim):
Kilobot.__init__(self, sim)
self.sim = sim
self.id = ANON # identity
self.top = 0
self.mark = 0 # finish
self.rules = None
self.target = 0 # orbit
self.tcount = 0
self.tvalid = False
self.spiral = 0 # spiral
self.scount = 0
if (self.secretID == 0): # the leader, the first violin
self.program = [self.activateL,
self.hold
]
else: # others
self.program = [self.activate,
self.get,
self.doOrbit,
self.doSpiral,
self.doFinish,
self.hold
]
##
## Info print hack; Simulator function override
##
def drawinfo(self, screen): # draw position text
pos = self.pos.inttup()
text = self.font.render(
"%d:%s" % (self.secretID, str(self.id) if self.id >= 0 else "-"),
True, (0, 0, 0))
screen.blit(text, (pos[0] - 12, pos[1] - 2))
##
## Activation
##
def activateL(self):
self.id = 0
print "0 assumes position as 0"
self.debug = "S:0"
self.message_out(0,0,GO)
self.toggle_tx()
self.set_color(0,3,0)
return self.goto(self.hold)
def activate(self):
self.get_message()
if (self.msgrx[5] == 1): # scatter
self.message_out(42,0,SCATTER)
self.enable_tx()
if (self.rand() % 4 == 0):
self.op = self.fullCW if self.op == self.fullCCW else self.fullCCW
self.op()
elif (self.rand() % 100 == 0): # start
self.disable_tx()
print self.secretID, "begins target hunt"
self.set_color(3,0,0)
self.target = 1
self.debug = "T:%d" % (self.target)
self.getRules(self.target)
return self.goto(self.get)
else:
self.midFWRD()
self.PC -= 1
##
## Listen
##
def get(self):
isConnected = False
for i in range(0, RXBUFSIZE): # check all of the buffer for msgs
self.get_message()
if (self.msgrx[5] == 1):
isConnected = True
self.history_add(self.msgrx[0:4])
else:
break
self.reset_rx()
if (isConnected and self.id == ANON):
return self.goto(self.doOrbit)
elif (isConnected and self.id != ANON):
return self.goto(self.doFinish)
else:
return self.goto(self.doSpiral)
##
## Move
##
def doSpiral(self): # spiral towards the swarm; turn, forward or finish
self.scount += 1
if (self.scount < TURN):
self.fullCCW()
elif (self.scount <= TURN + self.spiral):
self.midFWRD()
else:
self.scount = 0
self.spiral += 3
# are we by the form yet?
self.get_message()
if (self.msgrx[5] == 1):
self.midFWRD()
self.fullCCW()
self.scount = 0
self.spiral = 0
self.history_reset()
return self.goto(self.doOrbit)
else:
self.PC -= 1
def doOrbit(self): # follow the form gradient
# ensure we have enough history
if (not self.history_full()):
self.fullCCW()
return self.goto(self.get)
# reduce and aggregate message history
top, dists, heard, hbins = self.history_reduce()
heardR1 = self.rules[1][0] in heard
# adjust target, if necessary
if (top >= self.target):
self.target = top + 1
self.getRules(self.target)
print self.secretID, "target adjust to", self.target
self.debug = "T:%d" % (self.target)
self.tvalid = False
self.tcount = 0
return self.goto(self.get)
# decide: either follow general orbit, or start closing in
if (heardR1): # TODO un-NEGATE
rule1 = self.rules[1][0]
rdists = map((lambda x: x[3]), hbins[heard.index(rule1)])
# average hits the FRINGE, note: smart array
isValid = len(rdists) > 2 and sum(rdists)/len(rdists) == FRINGE
# if (self.target > 6 and not isValid): # make a bit easier to start later on
# isValid = FRINGE in rdists
#
# print self.secretID, rule1, rdists, isValid
if (isValid): # assume the position
print self.secretID, "assumes position as", self.target
self.id = self.target
self.debug = "S:%d" % (self.id)
self.set_color(3,0,3)
return self.goto(self.doFinish)
# steering decision
self._steer(FRINGE, dists, 50)
return self.goto(self.get)
def _steer(self, goal, dists, repel):
multi = 8
d = dists[0]
diff = d - goal
repels = d < repel
goingUp = diff < 0
near = 0*multi <= abs(diff) < 1*multi
far1 = 1*multi <= abs(diff) < 2*multi
far2 = 2*multi <= abs(diff) < 3*multi
far3 = 3*multi <= abs(diff) < 4*multi
far4 = 4*multi <= abs(diff) < 5*multi # 32 <= d < 40
avg = sum(dists)/len(dists)
# we are near our goal
if (goal < d <= goal + multi):
self.fullCCW()
self.fullCCW()
elif (d == goal):
self.midFWRD()
elif (goal - multi <= d < goal):
self.fullCW()
self.fullCW()
# we are too close
elif (repels):
self.fullCW()
# we are !near
elif (far1):
self.midFWRD()
self.op = self.fullCW if goingUp else self.fullCCW
self.op()
elif (far2): # going down
self.midFWRD()
self.op = self.fullCW if goingUp else self.fullCCW
if (self.rand() % 5 == 0):
self.op()
elif (far3):
self.midFWRD()
self.op = self.fullCW if goingUp else self.fullCCW
if (self.rand() % 10 == 0 or avg == d):
self.op()
elif (far4):
self.midFWRD()
self.op = self.fullCW if goingUp else self.fullCCW
if (self.rand() % 20 == 0 or avg == d):
self.op()
# NOTE: custom pattern;
def getRules(self, target):
rules = self.sim.config['form']['rules'] # ask the simulation for the rules
if (target == 0): # never used, merely for human readability
print "ERROR: someone requested rules for target 0"
exit(-42)
self.rules = [0, (0, 00), (0, 00), 0]
else:
self.rules = rules[target - 1]
def _forfeit(self):
print self.secretID, "forfeits position as", self.id
self.mark = 0
self.target = 0
self.tcount = 0
self.id = ANON
self.set_color(3,0,0)
self.disable_tx()
# TODO: proper maintenance procedure
# the last mms are the hardest
def doFinish(self):
# reduce and aggregate message history
top, dists, heard, hbins = self.history_reduce()
# if denied, reset and restart
if (top >= self.id):
self._forfeit()
return self.goto(self.get)
heardR1 = self.rules[1][0] in heard
heardR2 = self.rules[2][0] in heard
r1bin = r2bin = None
heardR1bound = heardR2bound = False
if (heardR1):
r1bin = map((lambda x: x[3]), hbins[heard.index(self.rules[1][0])])
heardR1bound = self.rules[1][1] in r1bin
if (heardR2):
r2bin = map((lambda x: x[3]), hbins[heard.index(self.rules[2][0])])
heardR2bound = self.rules[2][1] in r2bin
# done when second is reached (or when we want to be the first violin)
if ((self.mark == 1 and heardR2bound) or (heardR1bound and self.id == 1)):
self.top = self.id
if (self.id == self.secretN - 1):
self.message_out(self.id, self.id, DONE)
self.set_color(0,3,0)
else:
self.message_out(self.id, self.id, GO)
self.set_color(0,0,3)
self.enable_tx()
return self.goto(self.hold)
elif (heardR1): # otherwise follow first
if (heardR1bound):
self.mark = 1
r1bin = map((lambda x: x[3]), hbins[heard.index(self.rules[1][0])])
# steering
self._steer(self.rules[1][1], r1bin, 0)
elif (self.rand() % 100 == 0):
self._forfeit()
return self.goto(self.get)
return self.goto(self.get)
def hold(self):
self.PC -= 1
self.get_message()
if (self.msgrx[5] == 1):
heard = self.msgrx[0]
top = self.msgrx[1]
mode = self.msgrx[2]
dist = self.msgrx[3]
if (top > self.top): # to help others, all of the swarm shouts the top
self.top = top
self.message_out(self.id, self.top, mode)
if (mode == DONE): # finish trigger
self.enable_tx()
self.message_out(self.id, self.top, DONE)
self.set_color(0,3,0)
self.reset_rx()
|
gpl-3.0
| -4,100,531,029,184,355,300 | 4,616,671,362,007,755,000 | 30.38209 | 88 | 0.496433 | false |
Oslandia/vizitown_plugin
|
twisted/lore/lint.py
|
32
|
8849
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Checker for common errors in Lore documents.
"""
from xml.dom import minidom as dom
import parser
import urlparse
import os.path
from twisted.lore import tree, process
from twisted.web import domhelpers
from twisted.python import reflect
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
parserErrors = (SyntaxError,)
deprecatedModuleAttribute(
Version("Twisted", 13, 1, 0),
"parserErrors is deprecated",
__name__,
"parserErrors")
class TagChecker:
def check(self, dom, filename):
self.hadErrors = 0
for method in reflect.prefixedMethods(self, 'check_'):
method(dom, filename)
if self.hadErrors:
raise process.ProcessingFailure("invalid format")
def _reportError(self, filename, element, error):
hlint = element.hasAttribute('hlint') and element.getAttribute('hlint')
if hlint != 'off':
self.hadErrors = 1
pos = getattr(element, '_markpos', None) or (0, 0)
print "%s:%s:%s: %s" % ((filename,)+pos+(error,))
class DefaultTagChecker(TagChecker):
def __init__(self, allowedTags, allowedClasses):
self.allowedTags = allowedTags
self.allowedClasses = allowedClasses
def check_disallowedElements(self, dom, filename):
def m(node, self=self):
return not self.allowedTags(node.tagName)
for element in domhelpers.findElements(dom, m):
self._reportError(filename, element,
'unrecommended tag %s' % element.tagName)
def check_disallowedClasses(self, dom, filename):
def matcher(element, self=self):
if not element.hasAttribute('class'):
return 0
checker = self.allowedClasses.get(element.tagName, lambda x:0)
return not checker(element.getAttribute('class'))
for element in domhelpers.findElements(dom, matcher):
self._reportError(filename, element,
'unknown class %s' %element.getAttribute('class'))
def check_quote(self, doc, filename):
def matcher(node):
return ('"' in getattr(node, 'data', '') and
not isinstance(node, dom.Comment) and
not [1 for n in domhelpers.getParents(node)[1:-1]
if n.tagName in ('pre', 'code')])
for node in domhelpers.findNodes(doc, matcher):
self._reportError(filename, node.parentNode, 'contains quote')
def check_styleattr(self, dom, filename):
for node in domhelpers.findElementsWithAttribute(dom, 'style'):
self._reportError(filename, node, 'explicit style')
def check_align(self, dom, filename):
for node in domhelpers.findElementsWithAttribute(dom, 'align'):
self._reportError(filename, node, 'explicit alignment')
def check_style(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'style'):
if domhelpers.getNodeText(node) != '':
self._reportError(filename, node, 'hand hacked style')
def check_title(self, dom, filename):
doc = dom.documentElement
title = domhelpers.findNodesNamed(dom, 'title')
if len(title)!=1:
return self._reportError(filename, doc, 'not exactly one title')
h1 = domhelpers.findNodesNamed(dom, 'h1')
if len(h1)!=1:
return self._reportError(filename, doc, 'not exactly one h1')
if domhelpers.getNodeText(h1[0]) != domhelpers.getNodeText(title[0]):
self._reportError(filename, h1[0], 'title and h1 text differ')
def check_80_columns(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'pre'):
# the ps/pdf output is in a font that cuts off at 80 characters,
# so this is enforced to make sure the interesting parts (which
# are likely to be on the right-hand edge) stay on the printed
# page.
for line in domhelpers.gatherTextNodes(node, 1).split('\n'):
if len(line.rstrip()) > 80:
self._reportError(filename, node,
'text wider than 80 columns in pre')
for node in domhelpers.findNodesNamed(dom, 'a'):
if node.getAttribute('class').endswith('listing'):
try:
fn = os.path.dirname(filename)
fn = os.path.join(fn, node.getAttribute('href'))
lines = open(fn,'r').readlines()
except:
self._reportError(filename, node,
'bad listing href: %r' %
node.getAttribute('href'))
continue
for line in lines:
if len(line.rstrip()) > 80:
self._reportError(filename, node,
'listing wider than 80 columns')
def check_pre_py_listing(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'pre'):
if node.getAttribute('class') == 'python':
try:
text = domhelpers.getNodeText(node)
# Fix < and >
text = text.replace('>', '>').replace('<', '<')
# Strip blank lines
lines = filter(None,[l.rstrip() for l in text.split('\n')])
# Strip leading space
while not [1 for line in lines if line[:1] not in ('',' ')]:
lines = [line[1:] for line in lines]
text = '\n'.join(lines) + '\n'
try:
parser.suite(text)
except SyntaxError:
# Pretend the "..." idiom is syntactically valid
text = text.replace("...","'...'")
parser.suite(text)
except SyntaxError as e:
self._reportError(filename, node,
'invalid python code:' + str(e))
def check_anchor_in_heading(self, dom, filename):
headingNames = ['h%d' % n for n in range(1,7)]
for hname in headingNames:
for node in domhelpers.findNodesNamed(dom, hname):
if domhelpers.findNodesNamed(node, 'a'):
self._reportError(filename, node, 'anchor in heading')
def check_texturl_matches_href(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'a'):
if not node.hasAttribute('href'):
continue
text = domhelpers.getNodeText(node)
proto = urlparse.urlparse(text)[0]
if proto and ' ' not in text:
if text != node.getAttribute('href'):
self._reportError(filename, node,
'link text does not match href')
def check_lists(self, dom, filename):
for node in (domhelpers.findNodesNamed(dom, 'ul')+
domhelpers.findNodesNamed(dom, 'ol')):
if not node.childNodes:
self._reportError(filename, node, 'empty list')
for child in node.childNodes:
if child.nodeName != 'li':
self._reportError(filename, node,
'only list items allowed in lists')
def list2dict(l):
d = {}
for el in l:
d[el] = None
return d
classes = list2dict(['shell', 'API', 'python', 'py-prototype', 'py-filename',
'py-src-string', 'py-signature', 'py-src-parameter',
'py-src-identifier', 'py-src-keyword'])
tags = list2dict(["html", "title", "head", "body", "h1", "h2", "h3", "ol", "ul",
"dl", "li", "dt", "dd", "p", "code", "img", "blockquote", "a",
"cite", "div", "span", "strong", "em", "pre", "q", "table",
"tr", "td", "th", "style", "sub", "sup", "link"])
span = list2dict(['footnote', 'manhole-output', 'index'])
div = list2dict(['note', 'boxed', 'doit'])
a = list2dict(['listing', 'py-listing', 'html-listing', 'absolute'])
pre = list2dict(['python', 'shell', 'python-interpreter', 'elisp'])
allowed = {'code': classes.has_key, 'span': span.has_key, 'div': div.has_key,
'a': a.has_key, 'pre': pre.has_key, 'ul': lambda x: x=='toc',
'ol': lambda x: x=='toc', 'li': lambda x: x=='ignoretoc'}
def getDefaultChecker():
return DefaultTagChecker(tags.__contains__, allowed)
def doFile(file, checker):
doc = tree.parseFileAndReport(file)
if doc:
checker.check(doc, file)
|
gpl-2.0
| -2,604,033,220,322,685,000 | 7,705,096,335,745,209,000 | 40.544601 | 80 | 0.551588 | false |
fujunwei/chromium-crosswalk
|
tools/git/git-diff-ide.py
|
197
|
2668
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Invokes git diff [args...] and inserts file:line in front of each line of diff
output where possible.
This is useful from an IDE that allows you to double-click lines that begin
with file:line to open and jump to that point in the file.
Synopsis:
%prog [git diff args...]
Examples:
%prog
%prog HEAD
"""
import subprocess
import sys
def GitShell(args, ignore_return=False):
"""A shell invocation suitable for communicating with git. Returns
output as list of lines, raises exception on error.
"""
job = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, err) = job.communicate()
if job.returncode != 0 and not ignore_return:
print out
raise Exception("Error %d running command %s" % (
job.returncode, args))
return out.split('\n')
def PrintGitDiff(extra_args):
"""Outputs git diff extra_args with file:line inserted into relevant lines."""
current_file = '';
line_num = 0;
lines = GitShell('git diff %s' % ' '.join(extra_args))
for line in lines:
# Pass-through lines:
# diff --git a/file.c b/file.c
# index 0e38c2d..8cd69ae 100644
# --- a/file.c
if (line.startswith('diff ') or
line.startswith('index ') or
line.startswith('--- ')):
print line
continue
# Get the filename from the +++ line:
# +++ b/file.c
if line.startswith('+++ '):
# Filename might be /dev/null or a/file or b/file.
# Skip the first two characters unless it starts with /.
current_file = line[4:] if line[4] == '/' else line[6:]
print line
continue
# Update line number from the @@ lines:
# @@ -41,9 +41,9 @@ def MyFunc():
# ^^
if line.startswith('@@ '):
_, old_nr, new_nr, _ = line.split(' ', 3)
line_num = int(new_nr.split(',')[0])
print line
continue
print current_file + ':' + repr(line_num) + ':' + line
# Increment line number for lines that start with ' ' or '+':
# @@ -41,4 +41,4 @@ def MyFunc():
# file.c:41: // existing code
# file.c:42: // existing code
# file.c:43:-// deleted code
# file.c:43:-// deleted code
# file.c:43:+// inserted code
# file.c:44:+// inserted code
if line.startswith(' ') or line.startswith('+'):
line_num += 1
def main():
PrintGitDiff(sys.argv[1:])
if __name__ == '__main__':
main()
|
bsd-3-clause
| 105,199,084,595,271,970 | -2,062,391,673,609,346,600 | 27.688172 | 80 | 0.594453 | false |
apollo13/ansible
|
lib/ansible/galaxy/token.py
|
25
|
5736
|
########################################################################
#
# (C) 2015, Chris Houseknecht <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import os
import json
from stat import S_IRUSR, S_IWUSR
import yaml
from ansible import constants as C
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.urls import open_url
from ansible.utils.display import Display
display = Display()
class NoTokenSentinel(object):
""" Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """
def __new__(cls, *args, **kwargs):
return cls
class KeycloakToken(object):
'''A token granted by a Keycloak server.
Like sso.redhat.com as used by cloud.redhat.com
ie Automation Hub'''
token_type = 'Bearer'
def __init__(self, access_token=None, auth_url=None, validate_certs=True):
self.access_token = access_token
self.auth_url = auth_url
self._token = None
self.validate_certs = validate_certs
def _form_payload(self):
return 'grant_type=refresh_token&client_id=cloud-services&refresh_token=%s' % self.access_token
def get(self):
if self._token:
return self._token
# - build a request to POST to auth_url
# - body is form encoded
# - 'request_token' is the offline token stored in ansible.cfg
# - 'grant_type' is 'refresh_token'
# - 'client_id' is 'cloud-services'
# - should probably be based on the contents of the
# offline_ticket's JWT payload 'aud' (audience)
# or 'azp' (Authorized party - the party to which the ID Token was issued)
payload = self._form_payload()
resp = open_url(to_native(self.auth_url),
data=payload,
validate_certs=self.validate_certs,
method='POST',
http_agent=user_agent())
# TODO: handle auth errors
data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
# - extract 'access_token'
self._token = data.get('access_token')
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class GalaxyToken(object):
''' Class to storing and retrieving local galaxy token '''
token_type = 'Token'
def __init__(self, token=None):
self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict')
# Done so the config file is only opened when set/get/save is called
self._config = None
self._token = token
@property
def config(self):
if self._config is None:
self._config = self._read()
# Prioritise the token passed into the constructor
if self._token:
self._config['token'] = None if self._token is NoTokenSentinel else self._token
return self._config
def _read(self):
action = 'Opened'
if not os.path.isfile(self.b_file):
# token file not found, create and chomd u+rw
open(self.b_file, 'w').close()
os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
action = 'Created'
with open(self.b_file, 'r') as f:
config = yaml.safe_load(f)
display.vvv('%s %s' % (action, to_text(self.b_file)))
return config or {}
def set(self, token):
self._token = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.b_file, 'w') as f:
yaml.safe_dump(self.config, f, default_flow_style=False)
def headers(self):
headers = {}
token = self.get()
if token:
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
class BasicAuthToken(object):
token_type = 'Basic'
def __init__(self, username, password=None):
self.username = username
self.password = password
self._token = None
@staticmethod
def _encode_token(username, password):
token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'),
to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '')
b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict'))
return to_text(b64_val)
def get(self):
if self._token:
return self._token
self._token = self._encode_token(self.username, self.password)
return self._token
def headers(self):
headers = {}
headers['Authorization'] = '%s %s' % (self.token_type, self.get())
return headers
|
gpl-3.0
| 5,973,892,165,625,740,000 | -7,508,173,206,914,360,000 | 30.866667 | 111 | 0.598152 | false |
baylee/django
|
tests/model_package/tests.py
|
380
|
2668
|
from __future__ import unicode_literals
from django.db import connection, models
from django.db.backends.utils import truncate_name
from django.test import TestCase
from .models.article import Article, Site
from .models.publication import Publication
class Advertisement(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
class ModelPackageTests(TestCase):
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
site = Site.objects.create(name="example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_db_table(),
truncate_name('model_package_article_publications', connection.ops.max_name_length()),
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
|
bsd-3-clause
| -8,048,306,356,092,105,000 | 6,362,319,545,967,308 | 33.205128 | 98 | 0.634183 | false |
leture/sorl-thumbnail
|
sorl/thumbnail/engines/convert_engine.py
|
1
|
5931
|
from __future__ import unicode_literals, with_statement
import re
import os
import subprocess
from django.utils.encoding import smart_str
from django.core.files.temp import NamedTemporaryFile
from sorl.thumbnail.base import EXTENSIONS
from sorl.thumbnail.compat import b
from sorl.thumbnail.conf import settings
from sorl.thumbnail.engines.base import EngineBase
from sorl.thumbnail.compat import OrderedDict
size_re = re.compile(r'^(?:.+) (?:[A-Z]+) (?P<x>\d+)x(?P<y>\d+)')
class Engine(EngineBase):
"""
Image object is a dict with source path, options and size
"""
def write(self, image, options, thumbnail):
"""
Writes the thumbnail image
"""
if options['format'] == 'JPEG' and options.get(
'progressive', settings.THUMBNAIL_PROGRESSIVE):
image['options']['interlace'] = 'line'
image['options']['quality'] = options['quality']
args = settings.THUMBNAIL_CONVERT.split(' ')
args.append(image['source'] + '[0]')
for k in image['options']:
v = image['options'][k]
args.append('-%s' % k)
if v is not None:
args.append('%s' % v)
flatten = "on"
if 'flatten' in options:
flatten = options['flatten']
if settings.THUMBNAIL_FLATTEN and not flatten == "off":
args.append('-flatten')
suffix = '.%s' % EXTENSIONS[options['format']]
with NamedTemporaryFile(suffix=suffix, mode='rb') as fp:
args.append(fp.name)
args = map(smart_str, args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
out, err = p.communicate()
if err:
raise Exception(err)
thumbnail.write(fp.read())
def cleanup(self, image):
os.remove(image['source']) # we should not need this now
def get_image(self, source):
"""
Returns the backend image objects from a ImageFile instance
"""
with NamedTemporaryFile(mode='wb', delete=False) as fp:
fp.write(source.read())
return {'source': fp.name, 'options': OrderedDict(), 'size': None}
def get_image_size(self, image):
"""
Returns the image width and height as a tuple
"""
if image['size'] is None:
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(image['source'])
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
m = size_re.match(str(p.stdout.read()))
image['size'] = int(m.group('x')), int(m.group('y'))
return image['size']
def is_valid_image(self, raw_data):
"""
This is not very good for imagemagick because it will say anything is
valid that it can use as input.
"""
with NamedTemporaryFile(mode='wb') as fp:
fp.write(raw_data)
fp.flush()
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(fp.name)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = p.wait()
return retcode == 0
def _orientation(self, image):
# return image
# XXX need to get the dimensions right after a transpose.
if settings.THUMBNAIL_CONVERT.endswith('gm convert'):
args = settings.THUMBNAIL_IDENTIFY.split()
args.extend(['-format', '%[exif:orientation]', image['source']])
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
result = p.stdout.read().strip()
if result and result != b('unknown'):
result = int(result)
options = image['options']
if result == 2:
options['flop'] = None
elif result == 3:
options['rotate'] = '180'
elif result == 4:
options['flip'] = None
elif result == 5:
options['rotate'] = '90'
options['flop'] = None
elif result == 6:
options['rotate'] = '90'
elif result == 7:
options['rotate'] = '-90'
options['flop'] = None
elif result == 8:
options['rotate'] = '-90'
else:
# ImageMagick also corrects the orientation exif data for
# destination
image['options']['auto-orient'] = None
return image
def _colorspace(self, image, colorspace):
"""
`Valid colorspaces
<http://www.graphicsmagick.org/GraphicsMagick.html#details-colorspace>`_.
Backends need to implement the following::
RGB, GRAY
"""
image['options']['colorspace'] = colorspace
return image
def _crop(self, image, width, height, x_offset, y_offset):
"""
Crops the image
"""
image['options']['crop'] = '%sx%s+%s+%s' % (width, height, x_offset, y_offset)
image['size'] = (width, height) # update image size
return image
def _scale(self, image, width, height):
"""
Does the resizing of the image
"""
image['options']['scale'] = '%sx%s!' % (width, height)
image['size'] = (width, height) # update image size
return image
def _padding(self, image, geometry, options):
"""
Pads the image
"""
# The order is important. The gravity option should come before extent.
image['options']['background'] = options.get('padding_color')
image['options']['gravity'] = 'center'
image['options']['extent'] = '%sx%s' % (geometry[0], geometry[1])
return image
|
bsd-3-clause
| -6,740,727,638,373,946,000 | -347,316,018,024,879,040 | 33.684211 | 86 | 0.544933 | false |
JosephCastro/selenium
|
py/test/selenium/common/utils.py
|
65
|
2074
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import socket
import time
import urllib
import subprocess
import signal
SERVER_ADDR = "localhost"
DEFAULT_PORT = 4444
SERVER_PATH = "build/java/server/src/org/openqa/grid/selenium/selenium-standalone.jar"
def start_server(module):
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
url = "http://%s:%d/wd/hub" % (SERVER_ADDR, DEFAULT_PORT)
try:
_socket.connect((SERVER_ADDR, DEFAULT_PORT))
print("The remote driver server is already running or something else"
"is using port %d, continuing..." % DEFAULT_PORT)
except:
print("Starting the remote driver server")
module.server_proc = subprocess.Popen(
"java -jar %s" % SERVER_PATH,
shell=True)
assert wait_for_server(url, 10), "can't connect"
print("Server should be online")
def wait_for_server(url, timeout):
start = time.time()
while time.time() - start < timeout:
try:
urllib.urlopen(url)
return 1
except IOError:
time.sleep(0.2)
return 0
def stop_server(module):
# FIXME: This does not seem to work, the server process lingers
try:
os.kill(module.server_proc.pid, signal.SIGTERM)
time.sleep(5)
except:
pass
|
apache-2.0
| 1,989,897,670,481,163,500 | 437,114,706,082,645,300 | 31.40625 | 86 | 0.684185 | false |
twobraids/socorro
|
socorro/processor/general_transform_rules.py
|
9
|
2533
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from socorro.lib.transform_rules import Rule
#==============================================================================
class IdentifierRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.crash_id = raw_crash.uuid
processed_crash.uuid = raw_crash.uuid
return True
#==============================================================================
class CPUInfoRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.cpu_info = ''
processed_crash.cpu_name = ''
try:
processed_crash.cpu_info = (
'%s | %s' % (
processed_crash.json_dump['system_info']['cpu_info'],
processed_crash.json_dump['system_info']['cpu_count']
)
)
except KeyError:
# cpu_count is likely missing
processed_crash.cpu_info = (
processed_crash.json_dump['system_info']['cpu_info']
)
processed_crash.cpu_name = (
processed_crash.json_dump['system_info']['cpu_arch']
)
return True
#==============================================================================
class OSInfoRule(Rule):
#--------------------------------------------------------------------------
def version(self):
return '1.0'
#--------------------------------------------------------------------------
def _action(self, raw_crash, raw_dumps, processed_crash, processor_meta):
processed_crash.os_name = ''
processed_crash.os_version = ''
processed_crash.os_name = (
processed_crash.json_dump['system_info']['os'].strip()
)
processed_crash.os_version = (
processed_crash.json_dump['system_info']['os_ver'].strip()
)
return True
|
mpl-2.0
| 6,778,351,597,740,707,000 | -103,146,061,436,193,820 | 34.180556 | 79 | 0.411765 | false |
boehlke/OpenSlides
|
openslides/core/migrations/0008_changed_logo_fields.py
|
9
|
1996
|
# -*- coding: utf-8 -*-
# Generated by Finn Stutzenstein on 2018-07-13 12:43 :)
from __future__ import unicode_literals
from django.db import migrations
from openslides.core.config import config
def logos_available_default_to_database(apps, schema_editor):
"""
Writes the new default value of the 'logos_available' into the database.
"""
ConfigStore = apps.get_model("core", "ConfigStore")
try:
logos_available = ConfigStore.objects.get(key="logos_available")
except ConfigStore.DoesNotExist:
return # The key is not in the database, nothing to change here
default_value = config.config_variables["logos_available"].default_value
logos_available.value = default_value
logos_available.save()
def move_old_logo_settings(apps, schema_editor):
"""
moves the value of 'logo_pdf_header' to 'logo_pdf_header_L' and the same
for the footer. The old ones are deleted.
"""
ConfigStore = apps.get_model("core", "ConfigStore")
for place in ("header", "footer"):
try:
logo_pdf = ConfigStore.objects.get(key=f"logo_pdf_{place}")
except ConfigStore.DoesNotExist:
continue # The old entry is not in the database, nothing to change here
# The key of the new entry
new_value_key = f"logo_pdf_{place}_L"
try:
logo_pdf_L = ConfigStore.objects.get(key=new_value_key)
except ConfigStore.DoesNotExist:
logo_pdf_L = ConfigStore(key=new_value_key)
logo_pdf_L.value = {}
# Move the path to the new configentry
logo_pdf_L.value["path"] = logo_pdf.value.get("path", "")
# Save the new one, delete the old
logo_pdf_L.save()
logo_pdf.delete()
class Migration(migrations.Migration):
dependencies = [("core", "0007_auto_20180130_1400")]
operations = [
migrations.RunPython(logos_available_default_to_database),
migrations.RunPython(move_old_logo_settings),
]
|
mit
| 3,018,432,253,301,822,000 | 3,056,189,802,643,734,500 | 31.721311 | 84 | 0.6498 | false |
nwchandler/ansible
|
lib/ansible/module_utils/facts/virtual/base.py
|
199
|
2298
|
# base classes for virtualization facts
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
class Virtual:
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
platform = 'Generic'
# FIXME: remove load_on_init if we can
def __init__(self, module, load_on_init=False):
self.module = module
# FIXME: just here for existing tests cases till they are updated
def populate(self, collected_facts=None):
virtual_facts = self.get_virtual_facts()
return virtual_facts
def get_virtual_facts(self):
virtual_facts = {'virtualization_type': '',
'virtualization_role': ''}
return virtual_facts
class VirtualCollector(BaseFactCollector):
name = 'virtual'
_fact_class = Virtual
_fact_ids = set(['virtualization_type',
'virtualization_role'])
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
|
gpl-3.0
| 8,412,506,560,475,336,000 | -6,545,653,526,362,111,000 | 31.828571 | 72 | 0.682332 | false |
c0710204/edx-platform
|
lms/djangoapps/verify_student/ssencrypt.py
|
55
|
7001
|
"""
NOTE: Anytime a `key` is passed into a function here, we assume it's a raw byte
string. It should *not* be a string representation of a hex value. In other
words, passing the `str` value of
`"32fe72aaf2abb44de9e161131b5435c8d37cbdb6f5df242ae860b283115f2dae"` is bad.
You want to pass in the result of calling .decode('hex') on that, so this instead:
"'2\xfer\xaa\xf2\xab\xb4M\xe9\xe1a\x13\x1bT5\xc8\xd3|\xbd\xb6\xf5\xdf$*\xe8`\xb2\x83\x11_-\xae'"
The RSA functions take any key format that RSA.importKey() accepts, so...
An RSA public key can be in any of the following formats:
* X.509 subjectPublicKeyInfo DER SEQUENCE (binary or PEM encoding)
* PKCS#1 RSAPublicKey DER SEQUENCE (binary or PEM encoding)
* OpenSSH (textual public key only)
An RSA private key can be in any of the following formats:
* PKCS#1 RSAPrivateKey DER SEQUENCE (binary or PEM encoding)
* PKCS#8 PrivateKeyInfo DER SEQUENCE (binary or PEM encoding)
* OpenSSH (textual public key only)
In case of PEM encoding, the private key can be encrypted with DES or 3TDES
according to a certain pass phrase. Only OpenSSL-compatible pass phrases are
supported.
"""
from hashlib import md5, sha256
import base64
import binascii
import hmac
import logging
from Crypto import Random
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
log = logging.getLogger(__name__)
def encrypt_and_encode(data, key):
""" Encrypts and endcodes `data` using `key' """
return base64.urlsafe_b64encode(aes_encrypt(data, key))
def decode_and_decrypt(encoded_data, key):
""" Decrypts and decodes `data` using `key' """
return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)
def aes_encrypt(data, key):
"""
Return a version of the `data` that has been encrypted to
"""
cipher = aes_cipher_from_key(key)
padded_data = pad(data)
return cipher.encrypt(padded_data)
def aes_decrypt(encrypted_data, key):
"""
Decrypt `encrypted_data` using `key`
"""
cipher = aes_cipher_from_key(key)
padded_data = cipher.decrypt(encrypted_data)
return unpad(padded_data)
def aes_cipher_from_key(key):
"""
Given an AES key, return a Cipher object that has `encrypt()` and
`decrypt()` methods. It will create the cipher to use CBC mode, and create
the initialization vector as Software Secure expects it.
"""
return AES.new(key, AES.MODE_CBC, generate_aes_iv(key))
def generate_aes_iv(key):
"""
Return the initialization vector Software Secure expects for a given AES
key (they hash it a couple of times and take a substring).
"""
return md5(key + md5(key).hexdigest()).hexdigest()[:AES.block_size]
def random_aes_key():
return Random.new().read(32)
def pad(data):
""" Pad the given `data` such that it fits into the proper AES block size """
bytes_to_pad = AES.block_size - len(data) % AES.block_size
return data + (bytes_to_pad * chr(bytes_to_pad))
def unpad(padded_data):
""" remove all padding from `padded_data` """
num_padded_bytes = ord(padded_data[-1])
return padded_data[:-num_padded_bytes]
def rsa_encrypt(data, rsa_pub_key_str):
"""
`rsa_pub_key` is a string with the public key
"""
key = RSA.importKey(rsa_pub_key_str)
cipher = PKCS1_OAEP.new(key)
encrypted_data = cipher.encrypt(data)
return encrypted_data
def rsa_decrypt(data, rsa_priv_key_str):
"""
When given some `data` and an RSA private key, decrypt the data
"""
key = RSA.importKey(rsa_priv_key_str)
cipher = PKCS1_OAEP.new(key)
return cipher.decrypt(data)
def has_valid_signature(method, headers_dict, body_dict, access_key, secret_key):
"""
Given a message (either request or response), say whether it has a valid
signature or not.
"""
_, expected_signature, _ = generate_signed_message(
method, headers_dict, body_dict, access_key, secret_key
)
authorization = headers_dict["Authorization"]
auth_token, post_signature = authorization.split(":")
_, post_access_key = auth_token.split()
if post_access_key != access_key:
log.error("Posted access key does not match ours")
log.debug("Their access: %s; Our access: %s", post_access_key, access_key)
return False
if post_signature != expected_signature:
log.error("Posted signature does not match expected")
log.debug("Their sig: %s; Expected: %s", post_signature, expected_signature)
return False
return True
def generate_signed_message(method, headers_dict, body_dict, access_key, secret_key):
"""
Returns a (message, signature) pair.
"""
message = signing_format_message(method, headers_dict, body_dict)
# hmac needs a byte string for it's starting key, can't be unicode.
hashed = hmac.new(secret_key.encode('utf-8'), message, sha256)
signature = binascii.b2a_base64(hashed.digest()).rstrip('\n')
authorization_header = "SSI {}:{}".format(access_key, signature)
message += '\n'
return message, signature, authorization_header
def signing_format_message(method, headers_dict, body_dict):
"""
Given a dictionary of headers and a dictionary of the JSON for the body,
will return a str that represents the normalized version of this messsage
that will be used to generate a signature.
"""
headers_str = "{}\n\n{}".format(method, header_string(headers_dict))
body_str = body_string(body_dict)
message = headers_str + body_str
return message
def header_string(headers_dict):
"""Given a dictionary of headers, return a canonical string representation."""
header_list = []
if 'Content-Type' in headers_dict:
header_list.append(headers_dict['Content-Type'] + "\n")
if 'Date' in headers_dict:
header_list.append(headers_dict['Date'] + "\n")
if 'Content-MD5' in headers_dict:
header_list.append(headers_dict['Content-MD5'] + "\n")
return "".join(header_list) # Note that trailing \n's are important
def body_string(body_dict, prefix=""):
"""
Return a canonical string representation of the body of a JSON request or
response. This canonical representation will be used as an input to the
hashing used to generate a signature.
"""
body_list = []
for key, value in sorted(body_dict.items()):
if isinstance(value, (list, tuple)):
for i, arr in enumerate(value):
if isinstance(arr, dict):
body_list.append(body_string(arr, u"{}.{}.".format(key, i)))
else:
body_list.append(u"{}.{}:{}\n".format(key, i, arr).encode('utf-8'))
elif isinstance(value, dict):
body_list.append(body_string(value, key + ":"))
else:
if value is None:
value = "null"
body_list.append(u"{}{}:{}\n".format(prefix, key, value).encode('utf-8'))
return "".join(body_list) # Note that trailing \n's are important
|
agpl-3.0
| 3,872,414,003,810,332,700 | 6,307,432,763,571,555,000 | 32.658654 | 96 | 0.668762 | false |
michelp/pywt
|
util/refguide_check.py
|
2
|
27051
|
#!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a PyWavelets submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser
import numpy as np
# sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc',
# 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "pywt"
PUBLIC_SUBMODULES = []
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = []
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg": None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) +
validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf, }
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(
self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim',
'set_xlim', '# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*',
help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-examples", action="store_true",
help="Skip running doctests in the examples.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_examples = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_examples:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_examples:
examples_path = os.path.join(
os.getcwd(), 'doc', 'source', 'regression', '*.rst')
print('\nChecking examples files at %s:' % examples_path)
for filename in sorted(glob.glob(examples_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
examples_results = check_doctests_testfile(
filename, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, examples_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
|
mit
| -1,307,819,271,697,428,500 | -5,084,040,022,931,487,000 | 30.527972 | 101 | 0.555728 | false |
blaggacao/odoo
|
addons/product_visible_discount/product_visible_discount.py
|
12
|
4423
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_pricelist(osv.osv):
_inherit = 'product.pricelist'
_columns ={
'visible_discount': fields.boolean('Visible Discount'),
}
_defaults = {
'visible_discount': True,
}
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None):
def get_real_price(res_dict, product_id, qty, uom, pricelist):
item_obj = self.pool.get('product.pricelist.item')
price_type_obj = self.pool.get('product.price.type')
product_obj = self.pool.get('product.product')
field_name = 'list_price'
product = product_obj.browse(cr, uid, product_id, context)
product_read = product_obj.read(cr, uid, [product_id], [field_name], context=context)[0]
factor = 1.0
if uom and uom != product.uom_id.id:
product_uom_obj = self.pool.get('product.uom')
uom_data = product_uom_obj.browse(cr, uid, product.uom_id.id)
factor = uom_data.factor
return product_read[field_name] * factor
res=super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
context = {'lang': lang, 'partner_id': partner_id}
result=res['value']
pricelist_obj=self.pool.get('product.pricelist')
product_obj = self.pool.get('product.product')
if product and pricelist:
if result.get('price_unit',False):
price=result['price_unit']
else:
return res
product = product_obj.browse(cr, uid, product, context)
list_price = pricelist_obj.price_get(cr, uid, [pricelist],
product.id, qty or 1.0, partner_id, {'uom': uom,'date': date_order })
so_pricelist = pricelist_obj.browse(cr, uid, pricelist, context=context)
new_list_price = get_real_price(list_price, product.id, qty, uom, pricelist)
if so_pricelist.visible_discount and list_price[pricelist] != 0 and new_list_price != 0:
if product.company_id and so_pricelist.currency_id.id != product.company_id.currency_id.id:
# new_list_price is in company's currency while price in pricelist currency
new_list_price = self.pool['res.currency'].compute(cr, uid,
product.company_id.currency_id.id, so_pricelist.currency_id.id,
new_list_price, context=context)
discount = (new_list_price - price) / new_list_price * 100
if discount > 0:
result['price_unit'] = new_list_price
result['discount'] = discount
else:
result['discount'] = 0.0
else:
result['discount'] = 0.0
else:
result['discount'] = 0.0
return res
|
agpl-3.0
| 3,700,581,269,815,874,000 | 8,979,143,510,017,153,000 | 43.676768 | 123 | 0.58241 | false |
ArthurChiao/code-snippets
|
python/command-line-chess/Piece.py
|
1
|
1991
|
from Coordinate import Coordinate as C
from Move import Move
WHITE = True
BLACK = False
X = 0
Y = 1
class Piece:
def __init__(self, board, side, position, movesMade=0):
self.board = board
self.side = side
self.position = position
self.movesMade = 0
def __str__(self):
sideString = 'White' if self.side == WHITE else 'Black'
return 'Type : ' + type(self).__name__ + \
' - Position : ' + str(self.position) + \
" - Side : " + sideString + \
' -- Value : ' + str(self.value) + \
" -- Moves made : " + str(self.movesMade)
def move_in_direction(self, pos, direction, side):
'''
Get next move in specified direction
@param pos - current position of the piece
@param direction - direction to move to, in format [x_off, y_off]
@param side - which side the piece is: WHITE or BLACK
@return a Move() object which describes the next move of current piece
'''
for dis in range(1, 8):
movement = C(dis * direction[X], dis * direction[Y])
new_pos = pos + movement
if self.board.is_valid_pos(new_pos):
piece_at_pos = self.board.get_piece_at_pos(new_pos)
if piece_at_pos is None:
yield Move(self, new_pos)
elif piece_at_pos is not None:
if piece_at_pos.side != side:
yield Move(self, new_pos, piece_to_capture=piece_at_pos)
return
def __eq__(self, other):
if self.board == other.board and \
self.side == other.side and \
self.position == other.position and \
self.__class__ == other.__class__:
return True
return False
def copy(self):
cpy = self.__class__(self.board, self.side, self.position,
movesMade=self.movesMade)
return cpy
|
mit
| -2,536,512,351,276,904,000 | -5,316,291,122,815,607,000 | 32.745763 | 80 | 0.525866 | false |
bitcity/django
|
tests/many_to_many/tests.py
|
204
|
21896
|
from __future__ import unicode_literals
from django.db import transaction
from django.test import TestCase
from django.utils import six
from .models import Article, InheritedArticleA, InheritedArticleB, Publication
class ManyToManyTests(TestCase):
def setUp(self):
# Create a couple of Publications.
self.p1 = Publication.objects.create(id=None, title='The Python Journal')
self.p2 = Publication.objects.create(id=None, title='Science News')
self.p3 = Publication.objects.create(id=None, title='Science Weekly')
self.p4 = Publication.objects.create(title='Highlights for Children')
self.a1 = Article.objects.create(id=None, headline='Django lets you build Web apps easily')
self.a1.publications.add(self.p1)
self.a2 = Article.objects.create(id=None, headline='NASA uses Python')
self.a2.publications.add(self.p1, self.p2, self.p3, self.p4)
self.a3 = Article.objects.create(headline='NASA finds intelligent life on Earth')
self.a3.publications.add(self.p2)
self.a4 = Article.objects.create(headline='Oxygen-free diet works wonders')
self.a4.publications.add(self.p2)
def test_add(self):
# Create an Article.
a5 = Article(id=None, headline='Django lets you reate Web apps easily')
# You can't associate it with a Publication until it's been saved.
self.assertRaises(ValueError, getattr, a5, 'publications')
# Save it!
a5.save()
# Associate the Article with a Publication.
a5.publications.add(self.p1)
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: The Python Journal>'])
# Create another Article, and set it to appear in both Publications.
a6 = Article(id=None, headline='ESA uses Python')
a6.save()
a6.publications.add(self.p1, self.p2)
a6.publications.add(self.p3)
# Adding a second time is OK
a6.publications.add(self.p3)
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Adding an object of the wrong type raises TypeError
with six.assertRaisesRegex(self, TypeError, "'Publication' instance expected, got <Article.*"):
with transaction.atomic():
a6.publications.add(a5)
# Add a Publication directly via publications.add by using keyword arguments.
a6.publications.create(title='Highlights for Adults')
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Adults>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_reverse_add(self):
# Adding via the 'other' end of an m2m
a5 = Article(headline='NASA finds intelligent life on Mars')
a5.save()
self.p2.article_set.add(a5)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(a5.publications.all(),
['<Publication: Science News>'])
# Adding via the other end using keywords
self.p2.article_set.create(headline='Carbon-free diet works wonders')
self.assertQuerysetEqual(
self.p2.article_set.all(),
[
'<Article: Carbon-free diet works wonders>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA finds intelligent life on Mars>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
a6 = self.p2.article_set.all()[3]
self.assertQuerysetEqual(a6.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_related_sets(self):
# Article objects have access to their related Publication objects.
self.assertQuerysetEqual(self.a1.publications.all(),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
# Publication objects have access to their related Article objects.
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p1.article_set.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(Publication.objects.get(id=self.p4.id).article_set.all(),
['<Article: NASA uses Python>'])
def test_selects(self):
# We can perform kwarg queries across m2m relationships
self.assertQuerysetEqual(
Article.objects.filter(publications__id__exact=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__pk=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1.id),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications=self.p1),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science"),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__title__startswith="Science").distinct(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# The count() function respects distinct() as well.
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").count(), 4)
self.assertEqual(Article.objects.filter(publications__title__startswith="Science").distinct().count(), 3)
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2.id]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1.id, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(
Article.objects.filter(publications__in=[self.p1, self.p2]).distinct(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
self.assertQuerysetEqual(Article.objects.exclude(publications=self.p2),
['<Article: Django lets you build Web apps easily>'])
def test_reverse_selects(self):
# Reverse m2m queries are supported (i.e., starting at the table that
# doesn't have a ManyToManyField).
self.assertQuerysetEqual(Publication.objects.filter(id__exact=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(pk=self.p1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__headline__startswith="NASA"),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Publication.objects.filter(article__id__exact=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article__pk=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1.id),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(Publication.objects.filter(article=self.a1),
['<Publication: The Python Journal>'])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2.id]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1.id, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(
Publication.objects.filter(article__in=[self.a1, self.a2]).distinct(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
'<Publication: The Python Journal>',
])
def test_delete(self):
# If we delete a Publication, its Articles won't be able to access it.
self.p1.delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: Science News>',
'<Publication: Science Weekly>',
])
self.assertQuerysetEqual(self.a1.publications.all(), [])
# If we delete an Article, its Publications won't be able to access it.
self.a2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
def test_bulk_delete(self):
# Bulk delete some Publications - references to deleted publications should go
Publication.objects.filter(title__startswith='Science').delete()
self.assertQuerysetEqual(Publication.objects.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(Article.objects.all(),
[
'<Article: Django lets you build Web apps easily>',
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a2.publications.all(),
[
'<Publication: Highlights for Children>',
'<Publication: The Python Journal>',
])
# Bulk delete some articles - references to deleted objects should go
q = Article.objects.filter(headline__startswith='Django')
self.assertQuerysetEqual(q, ['<Article: Django lets you build Web apps easily>'])
q.delete()
# After the delete, the QuerySet cache needs to be cleared,
# and the referenced objects should be gone
self.assertQuerysetEqual(q, [])
self.assertQuerysetEqual(self.p1.article_set.all(),
['<Article: NASA uses Python>'])
def test_remove(self):
# Removing publication from an article:
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
'<Article: Oxygen-free diet works wonders>',
])
self.a4.publications.remove(self.p2)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And from the other end
self.p2.article_set.remove(self.a3)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA uses Python>',
])
self.assertQuerysetEqual(self.a3.publications.all(), [])
def test_set(self):
self.p2.article_set.set([self.a4, self.a3])
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications.set([self.p3.id])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
self.p2.article_set.set([])
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications.set([])
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.p2.article_set.set([self.a4, self.a3], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications.set([self.p3.id], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
self.p2.article_set.set([], clear=True)
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications.set([], clear=True)
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign(self):
# Relation sets can be assigned. Assignment clears any existing set members
self.p2.article_set = [self.a4, self.a3]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
# An alternate to calling clear() is to assign the empty set
self.p2.article_set = []
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.a4.publications = []
self.assertQuerysetEqual(self.a4.publications.all(), [])
def test_assign_ids(self):
# Relation sets can also be set using primary key values
self.p2.article_set = [self.a4.id, self.a3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science News>'])
self.a4.publications = [self.p3.id]
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
self.assertQuerysetEqual(self.a4.publications.all(),
['<Publication: Science Weekly>'])
def test_forward_assign_with_queryset(self):
# Ensure that querysets used in m2m assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.a1.publications = [self.p1, self.p2]
qs = self.a1.publications.filter(title='The Python Journal')
self.a1.publications = qs
self.assertEqual(1, self.a1.publications.count())
self.assertEqual(1, qs.count())
def test_reverse_assign_with_queryset(self):
# Ensure that querysets used in M2M assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ManyRelatedObjectsDescriptor.__set__. Refs #19816.
self.p1.article_set = [self.a1, self.a2]
qs = self.p1.article_set.filter(headline='Django lets you build Web apps easily')
self.p1.article_set = qs
self.assertEqual(1, self.p1.article_set.count())
self.assertEqual(1, qs.count())
def test_clear(self):
# Relation sets can be cleared:
self.p2.article_set.clear()
self.assertQuerysetEqual(self.p2.article_set.all(), [])
self.assertQuerysetEqual(self.a4.publications.all(), [])
# And you can clear from the other end
self.p2.article_set.add(self.a3, self.a4)
self.assertQuerysetEqual(self.p2.article_set.all(),
[
'<Article: NASA finds intelligent life on Earth>',
'<Article: Oxygen-free diet works wonders>',
])
self.assertQuerysetEqual(self.a4.publications.all(),
[
'<Publication: Science News>',
])
self.a4.publications.clear()
self.assertQuerysetEqual(self.a4.publications.all(), [])
self.assertQuerysetEqual(self.p2.article_set.all(),
['<Article: NASA finds intelligent life on Earth>'])
def test_inherited_models_selects(self):
"""
#24156 - Objects from child models where the parent's m2m field uses
related_name='+' should be retrieved correctly.
"""
a = InheritedArticleA.objects.create()
b = InheritedArticleB.objects.create()
a.publications.add(self.p1, self.p2)
self.assertQuerysetEqual(a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(b.publications.all(), [])
b.publications.add(self.p3)
self.assertQuerysetEqual(a.publications.all(),
[
'<Publication: Science News>',
'<Publication: The Python Journal>',
])
self.assertQuerysetEqual(b.publications.all(),
[
'<Publication: Science Weekly>',
])
|
bsd-3-clause
| -5,435,106,851,823,236,000 | 2,758,617,357,352,102,000 | 44.52183 | 113 | 0.571383 | false |
aerickson/ansible
|
lib/ansible/modules/files/blockinfile.py
|
49
|
10650
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, 2015 YAEGASHI Takeshi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: blockinfile
author:
- 'YAEGASHI Takeshi (@yaegashi)'
extends_documentation_fragment:
- files
- validate
short_description: Insert/update/remove a text block
surrounded by marker lines.
version_added: '2.0'
description:
- This module will insert/update/remove a block of multi-line text
surrounded by customizable marker lines.
options:
path:
aliases: [ dest, destfile, name ]
required: true
description:
- The file to modify.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
state:
required: false
choices: [ present, absent ]
default: present
description:
- Whether the block should be there or not.
marker:
required: false
default: '# {mark} ANSIBLE MANAGED BLOCK'
description:
- The marker line template.
"{mark}" will be replaced with "BEGIN" or "END".
block:
aliases: [ content ]
required: false
default: ''
description:
- The text to insert inside the marker lines.
If it's missing or an empty string,
the block will be removed as if C(state) were specified to C(absent).
insertafter:
required: false
default: EOF
description:
- If specified, the block will be inserted after the last match of
specified regular expression. A special value is available; C(EOF) for
inserting the block at the end of the file. If specified regular
expresion has no matches, C(EOF) will be used instead.
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
default: None
description:
- If specified, the block will be inserted before the last match of
specified regular expression. A special value is available; C(BOF) for
inserting the block at the beginning of the file. If specified regular
expresion has no matches, the block will be inserted at the end of the
file.
choices: [ 'BOF', '*regex*' ]
create:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a new file if it doesn't exist.
backup:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
version_added: "2.1"
notes:
- This module supports check mode.
- When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
"""
EXAMPLES = r"""
# Before 2.3, option 'dest' or 'name' was used instead of 'path'
- name: insert/update "Match User" configuration block in /etc/ssh/sshd_config
blockinfile:
path: /etc/ssh/sshd_config
block: |
Match User ansible-agent
PasswordAuthentication no
- name: insert/update eth0 configuration stanza in /etc/network/interfaces
(it might be better to copy files into /etc/network/interfaces.d/)
blockinfile:
path: /etc/network/interfaces
block: |
iface eth0 inet static
address 192.0.2.23
netmask 255.255.255.0
- name: insert/update configuration using a local file
blockinfile:
block: "{{ lookup('file', './local/ssh_config') }}"
dest: "/etc/ssh/ssh_config"
backup: yes
- name: insert/update HTML surrounded by custom markers after <body> line
blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
insertafter: "<body>"
content: |
<h1>Welcome to {{ ansible_hostname }}</h1>
<p>Last updated on {{ ansible_date_time.iso8601 }}</p>
- name: remove HTML as well as surrounding markers
blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
content: ""
- name: Add mappings to /etc/hosts
blockinfile:
path: /etc/hosts
block: |
{{ item.ip }} {{ item.name }}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
with_items:
- { name: host1, ip: 10.10.1.10 }
- { name: host2, ip: 10.10.1.11 }
- { name: host3, ip: 10.10.1.12 }
"""
import re
import os
import tempfile
from ansible.module_utils.six import b
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'),
state=dict(default='present', choices=['absent', 'present']),
marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),
block=dict(default='', type='str', aliases=['content']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
path = params['path']
if module.boolean(params.get('follow', None)):
path = os.path.realpath(path)
if os.path.isdir(path):
module.fail_json(rc=256,
msg='Path %s is a directory !' % path)
path_exists = os.path.exists(path)
if not path_exists:
if not module.boolean(params['create']):
module.fail_json(rc=257,
msg='Path %s does not exist !' % path)
original = None
lines = []
else:
f = open(path, 'rb')
original = f.read()
f.close()
lines = original.splitlines()
insertbefore = params['insertbefore']
insertafter = params['insertafter']
block = to_bytes(params['block'])
marker = to_bytes(params['marker'])
present = params['state'] == 'present'
if not present and not path_exists:
module.exit_json(changed=False, msg="File %s not present" % path)
if insertbefore is None and insertafter is None:
insertafter = 'EOF'
if insertafter not in (None, 'EOF'):
insertre = re.compile(insertafter)
elif insertbefore not in (None, 'BOF'):
insertre = re.compile(insertbefore)
else:
insertre = None
marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker)
marker1 = re.sub(b(r'{mark}'), b('END'), marker)
if present and block:
# Escape seqeuences like '\n' need to be handled in Ansible 1.x
if module.ansible_version.startswith('1.'):
block = re.sub('', block, '')
blocklines = [marker0] + block.splitlines() + [marker1]
else:
blocklines = []
n0 = n1 = None
for i, line in enumerate(lines):
if line == marker0:
n0 = i
if line == marker1:
n1 = i
if None in (n0, n1):
n0 = None
if insertre is not None:
for i, line in enumerate(lines):
if insertre.search(line):
n0 = i
if n0 is None:
n0 = len(lines)
elif insertafter is not None:
n0 += 1
elif insertbefore is not None:
n0 = 0 # insertbefore=BOF
else:
n0 = len(lines) # insertafter=EOF
elif n0 < n1:
lines[n0:n1+1] = []
else:
lines[n1:n0+1] = []
n0 = n1
lines[n0:n0] = blocklines
if lines:
result = b('\n').join(lines)
if original is None or original.endswith(b('\n')):
result += b('\n')
else:
result = ''
if original == result:
msg = ''
changed = False
elif original is None:
msg = 'File created'
changed = True
elif not blocklines:
msg = 'Block removed'
changed = True
else:
msg = 'Block inserted'
changed = True
if changed and not module.check_mode:
if module.boolean(params['backup']) and path_exists:
module.backup_local(path)
write_changes(module, result, path)
if module.check_mode and not path_exists:
module.exit_json(changed=changed, msg=msg)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
|
gpl-3.0
| 1,659,350,615,054,827,500 | -6,831,079,015,043,718,000 | 30.886228 | 124 | 0.610516 | false |
pierreg/tensorflow
|
tensorflow/python/kernel_tests/bitcast_op_test.py
|
29
|
2317
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.bitcast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class BitcastTest(tf.test.TestCase):
def _testBitcast(self, x, datatype, shape):
with self.test_session():
tf_ans = tf.bitcast(x, datatype)
out = tf_ans.eval()
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
self.assertEqual(buff_before, buff_after)
self.assertEqual(tf_ans.get_shape(), shape)
self.assertEqual(tf_ans.dtype, datatype)
def testSmaller(self):
x = np.random.rand(3, 2)
datatype = tf.int8
shape = [3, 2, 8]
self._testBitcast(x, datatype, shape)
def testLarger(self):
x = np.arange(16, dtype=np.int8).reshape([4, 4])
datatype = tf.int32
shape = [4]
self._testBitcast(x, datatype, shape)
def testSameDtype(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, x.dtype, shape)
def testSameSize(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, tf.int64, shape)
def testErrors(self):
x = np.zeros([1, 1], np.int8)
datatype = tf.int32
with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
tf.bitcast(x, datatype, None)
def testEmpty(self):
x = np.ones([], np.int32)
datatype = tf.int8
shape = [4]
self._testBitcast(x, datatype, shape)
def testUnknown(self):
x = tf.placeholder(tf.float32)
datatype = tf.int8
tf.bitcast(x, datatype, None)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
| -8,287,266,316,818,116,000 | -9,166,224,891,572,693,000 | 28.705128 | 80 | 0.646957 | false |
dsiddharth/access-keys
|
keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
|
8
|
5811
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
# catalog
service_table = sql.Table(
'service',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('type', sql.String(255)),
sql.Column('extra', sql.Text()))
service_table.create(migrate_engine, checkfirst=True)
endpoint_table = sql.Table(
'endpoint',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('region', sql.String(255)),
sql.Column('service_id',
sql.String(64),
sql.ForeignKey('service.id'),
nullable=False),
sql.Column('extra', sql.Text()))
endpoint_table.create(migrate_engine, checkfirst=True)
# identity
role_table = sql.Table(
'role',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(255), unique=True, nullable=False))
role_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name tenant_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='tenant_name_key'))
else:
tenant_table = sql.Table(
'tenant',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
tenant_table.create(migrate_engine, checkfirst=True)
metadata_table = sql.Table(
'metadata',
meta,
sql.Column('user_id', sql.String(64), primary_key=True),
sql.Column('tenant_id', sql.String(64), primary_key=True),
sql.Column('data', sql.Text()))
metadata_table.create(migrate_engine, checkfirst=True)
ec2_credential_table = sql.Table(
'ec2_credential',
meta,
sql.Column('access', sql.String(64), primary_key=True),
sql.Column('secret', sql.String(64)),
sql.Column('user_id', sql.String(64)),
sql.Column('tenant_id', sql.String(64)))
ec2_credential_table.create(migrate_engine, checkfirst=True)
if migrate_engine.name == 'ibm_db_sa':
# NOTE(blk-u): SQLAlchemy for PostgreSQL picks the name user_name_key
# for the unique constraint, but for DB2 doesn't give the UC a name
# unless we tell it to and there is no DDL to alter a column to drop
# an unnamed unique constraint, so this code creates a named unique
# constraint on the name column rather than an unnamed one.
# (This is used in migration 16.)
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), nullable=False),
sql.Column('extra', sql.Text()),
sql.UniqueConstraint('name', name='user_name_key'))
else:
user_table = sql.Table(
'user',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(64), unique=True, nullable=False),
sql.Column('extra', sql.Text()))
user_table.create(migrate_engine, checkfirst=True)
user_tenant_membership_table = sql.Table(
'user_tenant_membership',
meta,
sql.Column(
'user_id',
sql.String(64),
sql.ForeignKey('user.id'),
primary_key=True),
sql.Column(
'tenant_id',
sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True))
user_tenant_membership_table.create(migrate_engine, checkfirst=True)
# token
token_table = sql.Table(
'token',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('expires', sql.DateTime()),
sql.Column('extra', sql.Text()))
token_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = sql.MetaData()
meta.bind = migrate_engine
tables = ['user_tenant_membership', 'token', 'user', 'tenant', 'role',
'metadata', 'ec2_credential', 'endpoint', 'service']
for t in tables:
table = sql.Table(t, meta, autoload=True)
table.drop(migrate_engine, checkfirst=True)
|
apache-2.0
| -7,608,882,020,831,566,000 | 7,291,690,385,856,622,000 | 36.012739 | 79 | 0.607641 | false |
jsirois/commons
|
tests/python/twitter/common/zookeeper/serverset/test_endpoint.py
|
8
|
2960
|
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.zookeeper.serverset.endpoint import Endpoint, ServiceInstance, Status
def _service_instance(vals):
json = '''{
"additionalEndpoints": {
"aurora": {
"host": "smfd-akb-%d-sr1.devel.twitter.com",
"port": 31181
},
"health": {
"host": "smfd-akb-%d-sr1.devel.twitter.com",
"port": 31181
}
},
"serviceEndpoint": {
"host": "smfd-akb-%d-sr1.devel.twitter.com",
"port": 31181
},
"shard": %d,
"status": "ALIVE"
}''' % vals
return ServiceInstance.unpack(json)
def test_endpoint_equality():
assert Endpoint('host', 8340) == Endpoint('host', 8340)
def test_endpoint_hash_equality():
assert Endpoint('host', 8340).__hash__() == Endpoint('host', 8340).__hash__()
def test_endpoint_inequality():
assert Endpoint('host', 8340) != Endpoint('xhost', 8341)
def test_endpoint_hash_inequality():
assert Endpoint('host', 8340).__hash__() != Endpoint('xhost', 8341).__hash__()
def test_status_equality():
assert Status.from_string('DEAD') == Status.from_string('DEAD')
def test_status_hash_equality():
assert Status.from_string('DEAD').__hash__() == Status.from_string('DEAD').__hash__()
def test_status_inequality():
assert Status.from_string('DEAD') != Status.from_string('STARTING')
def test_status_hash_inequality():
assert Status.from_string('DEAD').__hash__() != Status.from_string('STARTING').__hash__()
def test_service_instance_equality():
vals = (1, 2, 3, 4)
assert _service_instance(vals) == _service_instance(vals)
def test_service_instance_hash_equality():
vals = (1, 2, 3, 4)
assert _service_instance(vals).__hash__() == _service_instance(vals).__hash__()
def test_service_instance_inequality():
vals = (1, 2, 3, 4)
vals2 = (5, 6, 7, 8)
assert _service_instance(vals) != _service_instance(vals2)
def test_service_instance_hash_inequality():
vals = (1, 2, 3, 4)
vals2 = (5, 6, 7, 8)
assert _service_instance(vals).__hash__() != _service_instance(vals2).__hash__()
|
apache-2.0
| 2,275,786,948,074,976,300 | 5,475,734,233,341,408,000 | 30.489362 | 100 | 0.584459 | false |
cnh/docker-registry
|
tests/mock_s3.py
|
35
|
1940
|
# -*- coding: utf-8 -*-
'''Monkeypatch s3 Storage preventing parallel key stream read in unittesting.
It is called from lib/storage/s3'''
import six
from docker_registry.core import exceptions
import docker_registry.drivers.s3 as s3
from docker_registry.testing import utils
@six.add_metaclass(utils.monkeypatch_class)
class Storage(s3.Storage):
# def stream_read(self, path, bytes_range=None):
# path = self._init_path(path)
# headers = None
# if bytes_range:
# headers = {'Range': 'bytes={0}-{1}'.format(*bytes_range)}
# key = self._boto_bucket.lookup(path, headers=headers)
# if not key:
# raise exceptions.FileNotFoundError('%s is not there' % path)
# while True:
# buf = key.read(self.buffer_size)
# if not buf:
# break
# yield buf
def stream_read(self, path, bytes_range=None):
path = self._init_path(path)
nb_bytes = 0
total_size = 0
key = self._boto_bucket.lookup(path)
if not key:
raise exceptions.FileNotFoundError('%s is not there' % path)
if bytes_range:
key._last_position = bytes_range[0]
total_size = bytes_range[1] - bytes_range[0] + 1
while True:
if bytes_range:
# Bytes Range is enabled
buf_size = self.buffer_size
if nb_bytes + buf_size > total_size:
# We make sure we don't read out of the range
buf_size = total_size - nb_bytes
if buf_size > 0:
buf = key.read(buf_size)
nb_bytes += len(buf)
else:
# We're at the end of the range
buf = ''
else:
buf = key.read(self.buffer_size)
if not buf:
break
yield buf
|
apache-2.0
| 355,226,586,210,373,400 | -9,117,199,731,831,626,000 | 33.035088 | 77 | 0.526289 | false |
ritchyteam/odoo
|
addons/account/wizard/account_report_aged_partner_balance.py
|
378
|
4012
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_aged_trial_balance(osv.osv_memory):
_inherit = 'account.common.partner.report'
_name = 'account.aged.trial.balance'
_description = 'Account Aged Trial balance Report'
_columns = {
'period_length':fields.integer('Period Length (days)', required=True),
'direction_selection': fields.selection([('past','Past'),
('future','Future')],
'Analysis Direction', required=True),
'journal_ids': fields.many2many('account.journal', 'account_aged_trial_balance_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'period_length': 30,
'date_from': lambda *a: time.strftime('%Y-%m-%d'),
'direction_selection': 'past',
}
def _print_report(self, cr, uid, ids, data, context=None):
res = {}
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['period_length', 'direction_selection'])[0])
period_length = data['form']['period_length']
if period_length<=0:
raise osv.except_osv(_('User Error!'), _('You must set a period length greater than 0.'))
if not data['form']['date_from']:
raise osv.except_osv(_('User Error!'), _('You must set a start date.'))
start = datetime.strptime(data['form']['date_from'], "%Y-%m-%d")
if data['form']['direction_selection'] == 'past':
for i in range(5)[::-1]:
stop = start - relativedelta(days=period_length)
res[str(i)] = {
'name': (i!=0 and (str((5-(i+1)) * period_length) + '-' + str((5-i) * period_length)) or ('+'+str(4 * period_length))),
'stop': start.strftime('%Y-%m-%d'),
'start': (i!=0 and stop.strftime('%Y-%m-%d') or False),
}
start = stop - relativedelta(days=1)
else:
for i in range(5):
stop = start + relativedelta(days=period_length)
res[str(5-(i+1))] = {
'name': (i!=4 and str((i) * period_length)+'-' + str((i+1) * period_length) or ('+'+str(4 * period_length))),
'start': start.strftime('%Y-%m-%d'),
'stop': (i!=4 and stop.strftime('%Y-%m-%d') or False),
}
start = stop + relativedelta(days=1)
data['form'].update(res)
if data.get('form',False):
data['ids']=[data['form'].get('chart_account_id',False)]
return self.pool['report'].get_action(cr, uid, [], 'account.report_agedpartnerbalance', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 3,649,345,679,335,204,400 | 2,238,514,000,521,612,300 | 45.651163 | 156 | 0.555085 | false |
DrMeers/django
|
django/contrib/gis/tests/layermap/models.py
|
75
|
2275
|
from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=25)
objects = models.GeoManager()
class Meta:
abstract = True
app_label = 'layermap'
def __str__(self):
return self.name
class State(NamedModel):
pass
class County(NamedModel):
state = models.ForeignKey(State)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
class CountyFeat(NamedModel):
poly = models.PolygonField(srid=4269)
class City(NamedModel):
name_txt = models.TextField(default='')
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
class Interstate(NamedModel):
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
# Same as `City` above, but for testing model inheritance.
class CityBase(NamedModel):
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
class ICity1(CityBase):
dt = models.DateField()
class Meta(CityBase.Meta):
pass
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
class Meta(ICity1.Meta):
pass
class Invalid(models.Model):
point = models.PointField()
class Meta:
app_label = 'layermap'
# Mapping dictionaries for the models above.
co_mapping = {'name': 'Name',
'state': {'name': 'State'}, # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'mpoly': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name': 'Name',
'poly': 'POLYGON',
}
city_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'dt': 'Created',
'point': 'POINT',
}
inter_mapping = {'name': 'Name',
'length': 'Length',
'path': 'LINESTRING',
}
|
bsd-3-clause
| 4,307,315,715,352,421,400 | -2,210,660,783,458,633,000 | 23.462366 | 134 | 0.620659 | false |
jmchilton/lwr
|
lwr/managers/queued_cli.py
|
1
|
2771
|
"""
LWR job manager that uses a CLI interface to a job queue (e.g. Torque's qsub,
qstat, etc...).
"""
from .base.external import ExternalBaseManager
from .util.external import parse_external_id
from .util.cli import CliInterface, split_params
from .util.job_script import job_script
from logging import getLogger
log = getLogger(__name__)
class CliQueueManager(ExternalBaseManager):
manager_type = "queued_cli"
def __init__(self, name, app, **kwds):
super(CliQueueManager, self).__init__(name, app, **kwds)
self.cli_interface = CliInterface(code_dir='.')
self.shell_params, self.job_params = split_params(kwds)
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[]):
self._check_execution_with_tool_file(job_id, command_line)
shell, job_interface = self.__get_cli_plugins()
stdout_path = self._stdout_path(job_id)
stderr_path = self._stderr_path(job_id)
job_name = self._job_name(job_id)
command_line = self._expand_command_line(command_line, dependencies_description)
job_script_kwargs = self._job_template_env(job_id, command_line=command_line, env=env)
extra_kwargs = job_interface.job_script_kwargs(stdout_path, stderr_path, job_name)
job_script_kwargs.update(extra_kwargs)
script = job_script(**job_script_kwargs)
script_path = self._write_job_script(job_id, script)
submission_command = job_interface.submit(script_path)
cmd_out = shell.execute(submission_command)
if cmd_out.returncode != 0:
log.warn("Failed to submit job - command was %s" % submission_command)
raise Exception("Failed to submit job")
external_id = parse_external_id(cmd_out.stdout.strip())
if not external_id:
message_template = "Failed to obtain externl id for job_id %s and submission_command %s"
message = message_template % (job_id, submission_command)
log.warn(message)
raise Exception("Failed to obtain external id")
self._register_external_id(job_id, external_id)
def __get_cli_plugins(self):
return self.cli_interface.get_plugins(self.shell_params, self.job_params)
def _kill_external(self, external_id):
shell, job_interface = self.__get_cli_plugins()
kill_command = job_interface.delete(external_id)
shell.execute(kill_command)
def _get_status_external(self, external_id):
shell, job_interface = self.__get_cli_plugins()
status_command = job_interface.get_single_status(external_id)
cmd_out = shell.execute(status_command)
state = job_interface.parse_single_status(cmd_out.stdout, external_id)
return state
|
apache-2.0
| 1,548,548,149,795,302,700 | -2,149,544,170,538,251,800 | 44.42623 | 100 | 0.666546 | false |
Sakaki/pyniconico
|
nico_tools/nicovideo_dl.py
|
1
|
8551
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from urllib import parse
import os
from nico_tools import nico_xml_parser
from nico_tools.nicowalker import NicoWalker
from nico_tools.mylist_items import GetMyListItems
from nico_tools.mylist import MyList
from progressbar import ProgressBar, Percentage, Bar, ETA
import subprocess
from nico_tools import mp3_tag
import re
character_replace = {
"\\": "\",
"/": "/",
":": ":",
"?": "?",
"\"": "”",
"<": "<",
">": ">",
"|": "|",
" ": "_",
"*": "*"
}
class DownloadVideo(NicoWalker):
command_name = 'download nicovideo flv'
def __init__(self, args):
super(DownloadVideo, self).__init__()
self.parser.add_argument('vid',
metavar='VID',
help='watch ID or mylist name')
self.parser.add_argument('-l', '--location',
dest='location',
default='./'.replace("/", os.sep),
help='video output folder')
self.parser.add_argument('-f', '--force',
dest='overwrite',
action='store_true',
help='allow overwrite')
self.parser.add_argument('--mp3',
dest='mp3conv',
action='store_true',
help='convert to mp3')
self.parser.add_argument('-b', '--bitrate',
dest='bitrate',
default=192,
help='mp3 bitrate')
self.parser.add_argument('-m', '--mylist',
dest='mylist',
action='store_true',
help='download mylist items')
self.set_parser(args)
def invoke(self):
if self.args.mylist:
mylist_array = MyList.get_mylist_names(self.session)
mylist_items = GetMyListItems.get_mylist_items(self.args.vid, self.session, mylist_array)
watch_ids = list(map(lambda mylist_item: mylist_item["item_data"]["watch_id"], mylist_items))
else:
watch_ids = [self.args.vid]
for watch_id in watch_ids:
# Watch IDがURLで指定された場合、動画IDを抜き出す
if watch_id.startswith("http"):
searched = re.search("nicovideo.jp/watch/([a-z0-9]*)", watch_id)
assert searched is not None, "URL中に動画IDが見つかりませんでした"
watch_id = searched.groups()[0]
# 3回失敗するまで繰り返す
for _ in range(3):
success = self.download(self.session, watch_id, self.args.location, self.args.overwrite,
self.args.mp3conv, self.args.bitrate)
if success:
# 成功したら終了
break
else:
# 失敗したらログインしなおす
print("Dispose old session and retry.")
self.login(force=True)
@staticmethod
def get_video_metadata(session, watch_id):
# 動画の情報をAPIから取得して、視聴ページを訪問
# (これをやらないとFLVのURLが取れない)
api_url = "http://ext.nicovideo.jp/api/getthumbinfo/{0}".format(watch_id)
api_response_text = session.get(api_url).text
video_info = nico_xml_parser.parse_video_info(api_response_text)
session.get(video_info["watch_url"])
return video_info
@staticmethod
def get_download_url(session, video_info, watch_id):
# ダウンロードURLを取得
print(video_info["title"], video_info["user_nickname"])
url = 'http://flapi.nicovideo.jp/api/getflv?v={0}'.format(watch_id)
text = session.get(url).text
flv_url = text.split('&')[2].replace('url=', '')
flv_url = parse.unquote(flv_url)
return flv_url
@staticmethod
def gen_video_path(save_directory, video_info):
# FLV保存のためのファイル名を決定
if not save_directory.endswith('/') and not save_directory.endswith(os.sep):
save_directory += '/'
# ファイル名に使用不可能の文字列は大文字に置き換える
video_title = video_info["title"]
for key, value in character_replace.items():
if key in video_title:
video_title = video_title.replace(key, value)
flv_path = "{0}{1}.{2}".format(save_directory, video_title, video_info["movie_type"])
flv_path = flv_path.replace("/", os.sep)
return flv_path
@staticmethod
def exec_download(session, flv_url, flv_path, progressbar):
# ダウンロード&保存処理開始
with open(flv_path, 'wb') as f:
# FLVのURLに対してセッションを開く
res = session.get(flv_url, stream=True)
if res.status_code != 200:
print("Download failed. Status code is {0}.".format(res.status_code))
return False
# ファイルサイズ取得
content_length = res.headers.get("content-length")
content_length = int(content_length)
# プログレスバー準備
division_size = int(content_length / 100)
downloaded_size = 0
progressbar.start()
# ストリーミングで4096bytesずつデータ取得
for data in res.iter_content(chunk_size=4096):
downloaded_size += len(data)
f.write(data)
# プログレスバーを更新
try:
progressbar.update(int(downloaded_size / division_size))
except OSError:
pass
progressbar.finish()
print('Saved as {0}'.format(flv_path))
return True
@staticmethod
def convert_mp3(video_info, flv_path, mp3_bitrate, leave_flv=False):
mp3_path = flv_path[:flv_path.rfind(".")] + ".mp3"
print(mp3_path)
command = 'ffmpeg -y -i "{0}" -ab {1}k "{2}"'.format(flv_path, mp3_bitrate, mp3_path)
try:
subprocess.run(command, shell=True, check=True)
except subprocess.CalledProcessError:
print("ffmpegの実行に失敗しました。")
exit(-1)
mp3_tag.add_tag(
mp3_path,
video_info["thumbnail_url"],
video_info["title"],
video_info["user_nickname"],
"ニコニコ動画"
)
# mp3ファイルが存在したら元のファイルを削除
if leave_flv is False and os.path.exists(mp3_path):
os.remove(flv_path)
@staticmethod
def download(session, watch_id, save_directory, overwrite=False, convert_mp3=False, mp3_bitrate="192"):
if convert_mp3:
try:
subprocess.run("ffmpeg -version", shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
print("ffmpegが実行できません。ffmpegがインストールされ、PATHに含まれているか確認してください。\n"
"インストールする場合、 https://www.ffmpeg.org/download.html を参照してください。")
exit(-1)
video_info = DownloadVideo.get_video_metadata(session, watch_id)
flv_url = DownloadVideo.get_download_url(session, video_info, watch_id)
flv_path = DownloadVideo.gen_video_path(save_directory, video_info)
# ファイルが存在したら終了
if not overwrite and os.path.exists(flv_path):
print("ファイルが存在します。上書きする場合は --overwrite オプションを指定してください。")
return True
# ダウンロード実行
widgets = ["Downloading: ", Percentage(), Bar(), ETA()]
progressbar = ProgressBar(maxval=100, widgets=widgets)
if not DownloadVideo.exec_download(session, flv_url, flv_path, progressbar):
return False
# mp3へ変換
if convert_mp3:
DownloadVideo.convert_mp3(video_info, flv_path, mp3_bitrate)
return True
if __name__ == '__main__':
DownloadVideo(None).invoke()
|
lgpl-3.0
| -2,158,245,426,995,829,500 | 6,961,511,189,298,989,000 | 38.790816 | 107 | 0.537505 | false |
GehenHe/Recognize-Face-on-Android
|
tensorflow/contrib/session_bundle/constants.py
|
121
|
1311
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for export/import."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
VERSION_FORMAT_SPECIFIER = "%08d"
ASSETS_DIRECTORY = "assets"
EXPORT_BASE_NAME = "export"
EXPORT_SUFFIX_NAME = "meta"
META_GRAPH_DEF_FILENAME = "export.meta"
VARIABLES_FILENAME = "export"
VARIABLES_FILENAME_PATTERN = "export-?????-of-?????"
VARIABLES_FILENAME_PATTERN_V2 = "export.data-?????-of-?????"
VARIABLES_INDEX_FILENAME_V2 = "export.index"
INIT_OP_KEY = "serving_init_op"
SIGNATURES_KEY = "serving_signatures"
ASSETS_KEY = "serving_assets"
GRAPH_KEY = "serving_graph"
|
apache-2.0
| -2,572,334,972,042,716,000 | -8,693,045,878,896,272,000 | 37.558824 | 80 | 0.694889 | false |
manisandro/qgis-cloud-plugin
|
qgiscloud/resources_rc.py
|
1
|
33300
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x1c\x5f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xc8\x00\x00\x00\x4b\x08\x06\x00\x00\x00\x35\x7c\xc8\xe8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x1c\x16\x49\x44\x41\x54\x78\x5e\xed\x9d\x09\x9c\x14\xc5\
\xf5\xc7\xbb\x7b\x66\x16\xe4\x74\xd9\x5d\x05\x54\x14\x34\x9a\x78\
\x82\xb7\x51\x44\x14\x0f\xbc\x4f\x3c\xfe\x1a\xc5\x3b\xde\xfa\xf7\
\xf8\x0b\x82\xa3\x1c\x1e\x89\xf1\x8a\x46\x63\x14\xf5\xef\x15\x35\
\x26\xf1\x56\xe4\x70\xc5\x78\xc5\x68\x8c\x8a\xf1\x88\x82\x88\x07\
\xec\x72\xb8\xa8\xec\x31\x9d\xdf\x77\x98\x5e\x7a\x7b\x7a\x66\x7a\
\x66\x7a\x11\x96\x7e\x9f\xcf\x6f\x67\xba\xba\xea\x55\xd5\xab\xf7\
\x5e\xbd\xae\xaa\x9e\x35\x8c\x88\x22\x09\x44\x12\x88\x24\x10\x49\
\x20\x92\x40\x24\x81\x48\x02\x91\x04\x22\x09\x44\x12\x88\x24\x10\
\x49\x20\x92\x40\x24\x81\x48\x02\x91\x04\x22\x09\x44\x12\x88\x24\
\x10\x49\x60\x15\x96\x80\xb9\x0a\xb7\x3d\x6a\x7a\x07\x92\x40\xf7\
\x4b\x5f\xa8\xea\xdc\x25\xb1\x83\x61\x59\x83\x0c\xd3\xee\x6d\xdb\
\x56\xdc\xe9\x9e\x69\xd8\x29\xc3\xb0\x17\xa5\x6c\x73\xa6\x65\xa7\
\x5e\x9d\xf7\xce\xd7\xff\x31\x1e\x19\xd1\xb2\x22\xba\x1f\x19\xc8\
\x8a\x90\x72\x54\x47\x6e\x09\x24\x1f\xae\xa8\x49\xf4\x3d\xd8\x30\
\x8d\xf3\x0c\xdb\x96\x71\x98\x9d\x73\x65\xb6\x6d\x23\x65\x9a\xc6\
\xec\x54\xca\x78\xe0\x87\xe6\xa6\x9b\x97\x24\x87\x7e\x95\x2b\x6f\
\x58\xe9\x91\x81\x84\x25\xc9\x88\x4f\xf1\x12\x38\xfb\xe9\x4e\x35\
\x7d\xba\x5f\x64\xd8\xc6\x25\x32\x8c\x6e\x41\x19\xd8\x9a\x4e\x64\
\x4c\xcf\x36\xa5\x52\x67\x2e\x1a\x33\xe4\xd3\xa0\xe5\x4a\xc9\x67\
\x95\x52\x28\x2a\x13\x49\x20\x0c\x09\x54\xaf\xdd\x6d\xa4\x61\x98\
\xa3\x8a\x31\x0e\xea\x95\x57\x37\x45\xc3\x2b\x62\xd6\x35\xbd\x92\
\x4f\xf7\x08\xa3\x2d\xb9\x78\xb4\xc6\x79\xb9\x32\xa4\xd3\x93\x49\
\x19\xd2\x90\x8a\xea\xef\x16\x27\x52\x5d\x12\x65\xcf\x3a\xf5\xc6\
\x1a\x8a\x29\xe7\x35\x1a\xc9\x11\x4d\xe2\x2e\x87\x10\x0a\xc5\xc4\
\xa5\x93\x10\xac\x4f\xa5\x55\xf9\x83\x8a\x35\xe6\x28\x8a\x5c\xd6\
\x10\xbc\x4e\x87\xfc\xb9\xca\xe4\x60\x95\xee\x03\x7d\xa1\x4f\xed\
\x45\xc4\xf0\xdf\x09\x61\xc9\xbf\xa8\x76\x56\x8f\x9b\xba\x89\x69\
\x59\xff\xab\x42\xc8\xac\x24\xb2\x0d\xf3\x60\x23\xd6\xfd\x19\x15\
\x9e\x54\x12\x83\x00\x85\xf2\x29\xbb\x59\x99\x9c\xb1\x5e\x2c\x91\
\xda\xcb\x34\xac\x9d\x35\xa7\xad\x27\xab\xed\xa2\xa9\x2d\x5f\x99\
\x00\x55\xa6\xb3\xb4\x68\x54\x16\x8b\xd3\x47\x66\x4b\x6a\x4a\x73\
\x4b\xaa\x76\x61\x72\xe8\xc2\xa0\x85\x5d\xf9\x50\xa0\x9f\x09\xfb\
\x08\xdb\x0a\xbd\x05\x14\xab\x3d\x88\x7e\x5f\x27\x3c\x92\x83\x79\
\xa5\xd2\x6f\x12\x36\x14\xe4\x00\xd2\x44\xfb\xee\x16\x6e\xcf\x5c\
\xe7\xfb\xa8\xd0\xcd\x41\x02\x7d\xd9\x4a\xa8\x16\x12\xf9\x0a\x94\
\x71\x0f\x23\xfe\xb7\x70\xb6\xb0\xa8\x0c\x3e\x25\x17\xad\x9a\x58\
\x7b\xb1\x65\x58\xd7\x94\xcc\x60\x79\xc1\xe7\xe7\xd5\x2f\x39\xd4\
\xf8\xf5\xde\x4b\x42\xe0\x95\xc5\xc2\xdf\xdb\x26\x9f\xe8\x52\x5d\
\x51\x79\x9c\x8c\xe1\x1c\x4d\x66\x9b\xa8\x54\xcc\x64\x62\x83\xf4\
\x94\x14\x06\xc1\x45\x18\xae\x55\x8b\x53\xe3\x96\xf5\x72\xd5\x84\
\xda\xab\xea\x9a\xa6\x4e\xd3\x6c\xe5\x28\x57\xa1\x6a\xd6\x52\x06\
\x06\x78\xa4\xd0\x77\x19\xbb\x42\x45\xca\xbe\x8f\x01\xe6\x22\x14\
\x7c\x6b\x61\x53\x4f\x86\x69\xb9\x0a\xb8\xd2\x31\xaa\x8b\x85\xc3\
\x84\xaa\x00\xf9\xc3\xc8\x82\x23\x69\x2f\x03\xcc\xdf\x3e\x3d\x7b\
\x48\x9f\x06\xe7\xcf\x14\xec\xae\x6d\xdb\x5b\x54\xf5\xec\xdc\xaf\
\xce\x30\x66\x06\x2b\x51\x5c\x2e\x6f\x38\x60\xac\x99\x9c\xb6\x66\
\x4d\xa2\xf2\x6a\x2d\xad\xdd\xa8\x19\x63\x53\x75\xa4\x3d\xa7\x79\
\x0c\xae\xb3\xea\xd9\xc3\x32\xad\x07\xaa\x13\xbb\x9f\x68\x1c\xf1\
\x70\x90\xfa\x06\xa8\x9b\x4c\xab\xa3\x84\x75\x84\x70\xac\xb6\x38\
\xd9\x85\x95\x7b\x7b\x31\x7a\x48\x38\x55\x58\x51\xc6\x11\x56\xdb\
\x4b\xe2\xd3\xa3\xca\xea\x6a\xda\xe6\xba\x25\x15\xf6\x14\x92\x7e\
\xf6\x54\x84\xb3\x76\x18\xbc\xfc\x78\xb4\x35\x10\x2d\xb9\xc5\x2b\
\x62\xa3\x6d\xd3\x3e\x43\x3a\xd7\x5e\xa1\x8a\x5f\x3b\x48\xd3\x8c\
\x60\x5d\x53\x33\xb0\x2f\x5e\x34\x1f\xd5\xe8\xe6\x8d\xc2\xbe\x42\
\x96\x81\xe7\x2b\xb8\x12\xde\x63\x76\xbe\x55\x20\x3c\x5c\x6d\x28\
\x65\x98\x71\xe9\x58\x38\xb3\x97\x69\xc7\x14\x72\xe4\x5c\x1a\x2e\
\x57\xa8\x6d\x14\xac\x2a\xde\x7b\xb8\x74\xee\xb4\x5c\xb3\x06\xcb\
\x6b\x61\x20\x57\xa3\x15\xbd\xf5\xd2\xbd\x31\x3d\xc7\xbd\xd8\x3f\
\x57\x1e\xa5\x9f\x26\xec\x97\xe7\x3e\x0f\x9d\xed\x81\x3c\x55\x96\
\x74\x8b\x90\x8c\xb0\x6a\x9b\x3c\xa5\xdb\xa3\x1f\xf0\x84\x1c\xde\
\x79\xaa\x6f\x9f\x5b\x76\xa3\x22\xf6\x55\x84\x96\x3f\x83\x28\x2e\
\xb4\x4c\xf3\x17\x6a\x77\x77\x6f\xdb\x15\xe7\xbd\x6f\xd8\xe6\xa3\
\xa6\x65\xcc\x96\x89\x38\x02\xf6\x66\x0b\x7c\x2d\x06\x71\x49\x68\
\x2b\x85\x57\x87\xab\x10\xcf\x12\xcb\xc9\x34\x36\x4f\x98\xb1\x83\
\x94\x70\x83\x0f\x43\xa6\xe5\xa3\x05\xaf\x80\x59\x91\x99\x21\x3c\
\x29\xd4\xfb\x94\x0b\x23\x89\x3a\x5f\x09\x83\x51\x86\xc7\x16\xfa\
\x3c\xc0\x87\xdf\x52\xa5\x3d\x27\x4c\x11\x1a\x7c\xee\x87\x91\x44\
\x5f\x14\xb6\x1b\xed\xf2\x60\x1b\x46\x03\x57\x16\x1e\xad\x06\xd2\
\xb3\xaa\xfb\x3a\x5a\x36\xdb\xc6\xab\x79\xb2\x87\xd7\x53\xa6\x3d\
\xb2\x7e\xf4\xe0\xf7\x43\x6d\xb4\x96\x8e\xab\x2a\x76\x9f\xac\x95\
\x0c\x56\x78\x3c\x46\x62\x0f\x31\x4e\xbd\xfd\x16\xe3\xf7\xa7\xb1\
\x0c\xec\xa6\xcd\x75\xc1\xf3\x87\x97\x88\xe1\xcf\x17\xe6\x79\x6f\
\xac\xc4\xd7\x3b\xa8\x6d\x84\x8b\x6e\xc2\xd0\xaf\x17\xc6\x0b\x91\
\xf2\x7a\x84\xf3\x63\x5c\xb6\x86\x58\x71\xab\x85\x07\x44\x96\x2a\
\xdb\x92\x6d\x4f\xaa\x1f\xb5\x6b\xb8\xc6\x41\x0d\x5a\xad\xaa\x7b\
\xeb\xab\x27\x34\x9b\x4c\xf6\x56\xa9\x99\x65\x9d\xca\xca\x01\x5d\
\xb2\xd2\x0d\x83\x19\xc4\xfb\x6c\xc4\x32\x25\x71\xfc\xaa\x64\x1c\
\x74\x6d\x7d\x9f\xfe\xcd\x52\x1a\x0e\xa3\x43\x1b\x47\xe7\x2e\x26\
\x8e\x39\x94\x67\x10\x39\x75\x05\x3e\xb6\x57\x27\x7c\x44\x5b\x5a\
\x52\xab\x81\xa4\xe2\x56\x85\x56\xae\xda\x3c\x93\x68\xf6\x68\x56\
\x03\xbe\x2e\x8d\x75\x80\x52\xe9\x03\x67\x76\x16\x7f\xd3\x36\x12\
\xcd\x56\x5a\x88\x5e\x42\x10\xde\x49\xee\x3b\xa5\x11\x2e\xac\x6a\
\xe4\x37\xa8\x8b\xd5\x89\x1f\x65\x5f\x62\x45\x0a\xaf\x25\x55\x51\
\xad\x31\xce\x76\xc6\x25\x34\x42\xca\x20\x43\xb3\xd6\x2b\xa1\x68\
\xa0\x22\x6d\x57\x81\x5a\x37\x3b\x02\x95\x8d\x32\x95\x27\x01\xbf\
\xfd\x1e\xc6\x63\x55\x5f\x99\x2b\x28\x95\x78\xcc\xde\x4e\x8e\x37\
\xb4\x25\x6d\x3d\x1b\xef\x62\x68\x05\xb6\x60\xc5\x25\x64\xe8\xf0\
\x83\x51\x82\x4c\x56\x54\x11\xbf\x99\x82\x67\xb1\x50\xf6\x07\x56\
\x54\x27\x8a\xad\x87\x7d\x36\xf9\xe1\x11\x5a\xb1\x0c\x4d\xf7\x4c\
\xdb\xde\xbd\x57\x7c\xed\x76\x59\x2a\xf7\x0b\x63\x8a\xed\xf3\xca\
\x94\x1f\xa1\xf3\xec\x52\xae\xf0\x79\x58\xe6\xfc\x94\x77\x91\x20\
\xcc\xbe\xb2\xf3\xdb\x2c\xb8\xc7\x80\x9d\xfa\xcb\x85\x6b\x85\xcf\
\x33\xf7\xbd\x75\xe6\x5a\x9e\xcd\x95\x4e\x5f\x38\x43\xc6\xea\x58\
\xd9\x2b\x90\xde\xc6\x14\x79\x6d\x26\x12\xf1\xe3\x35\x7b\x0c\xf1\
\xc6\xc9\x45\xf2\x69\x9b\xdd\x34\x6b\x2c\x33\x76\x49\x8f\xe4\xb3\
\x23\x17\x27\xf7\x09\x75\x15\xb3\xa3\x19\x08\xe7\x97\x6e\x11\xfa\
\x09\x7e\x21\x4c\xd0\x71\x40\x71\xe7\x0b\x6f\x0b\xcf\x0b\xff\x10\
\x50\xb0\x30\xe9\x35\x31\xfb\x44\x60\xb3\xd0\x4d\x87\xe8\x62\xa8\
\xc0\xa2\x83\xdf\x21\xc7\x5c\x86\x40\x7f\xfd\x0c\x80\xbe\x70\xce\
\xed\x5d\x61\xaa\x50\x2b\xf8\xcd\x5e\x4a\x6e\x47\xd2\x09\x89\xea\
\x81\x7d\x8f\xd4\x13\xe4\x28\x19\x47\x3b\x84\x43\xf6\x7e\x15\x89\
\x6e\x13\x64\x24\xa3\xc3\x34\x92\x8e\x66\x20\xce\x81\x3f\xce\x36\
\x85\x41\x07\x8b\xc9\xb9\xc2\x63\xc2\xd5\xc2\xc7\x61\x30\xcd\xf0\
\x60\xc5\xea\x0f\xc2\x55\x82\x77\x1c\xd6\x54\x1a\x08\x93\x86\x89\
\xd9\xe9\xc2\x74\x61\xbc\xf0\x72\x98\xcc\xf3\xf0\x72\x0e\xbd\x9e\
\xac\x3c\x67\x0a\x6c\x06\x87\x4e\x6c\x6e\x6b\x77\xfe\x94\x8a\x8a\
\xae\xeb\x56\x4e\xac\xbd\x7a\x41\xe3\x57\x6f\xe8\xb4\xb8\x9f\x83\
\x29\xaa\x6e\xef\xc0\x14\x55\x78\x35\xc9\xcc\x6a\xcb\x49\x02\x27\
\x6c\x19\xe0\xd7\x43\xec\xf7\x1d\xe2\xb5\x99\xc0\x06\x6d\xb9\x61\
\x61\x90\x66\x71\x24\x83\xd3\xc2\x9c\x80\x3e\x47\x78\x3c\x48\xa1\
\x9c\x79\x92\xd3\xe2\x6b\xc6\x53\xeb\xc4\xed\x8a\xfe\x76\xcc\x96\
\x41\x5b\xad\x7d\xd0\xcb\x7f\x5c\x54\xea\xcc\xd5\x56\x9a\xd6\xf6\
\xd0\xbd\x8d\xf3\x3d\x77\xf0\xb6\x20\x2b\x9a\x9a\x5d\x3e\xb1\x4d\
\xb3\x5e\xcf\x15\xcc\x7c\x69\x52\x79\x4b\x33\x0f\x2f\x54\xf5\xd3\
\xfd\xf5\x73\x1d\x83\xca\x9c\x00\xd9\x3f\x6e\x1b\x3b\xd6\x24\xfa\
\xd4\xda\x13\x6a\x67\x88\xcf\xdc\x94\x15\x5b\x1e\x2a\xa7\xb4\x36\
\x6b\x1b\xdf\xdb\x56\xf3\x9c\x05\x0b\x1a\x3f\x2d\x74\x0a\x38\x32\
\x90\x9c\xa3\x9f\x75\x83\x87\x40\xf6\x5b\x8e\x11\x3e\xcc\xba\x5b\
\x5a\x02\xa1\xce\x85\x02\x71\x33\x46\xd8\xb3\x34\x36\x45\x97\x92\
\x92\xa5\x37\x24\xe7\x0a\x7f\x2f\xba\xb4\x34\xb4\xfa\xca\x97\xb6\
\x96\xda\x9e\xa6\x83\xa6\xbb\x4b\x79\xfb\x28\x29\xe1\x7e\x15\x22\
\xbd\x20\x6a\xea\xa0\xab\xbe\x14\x7a\xde\x90\xc6\x7e\x24\x33\xb8\
\x23\xd5\x62\x3d\x5d\x9f\x6a\x9a\x65\xcc\xfd\x70\xa9\xd1\x77\xe3\
\xe5\xe1\xe2\xdc\x0f\x4d\x5d\xc7\x34\xf5\x54\x1b\x15\xb1\x1d\x63\
\xb6\x71\xb2\x8c\x68\x77\xf1\xf5\xdf\x4b\x31\x4d\x42\xed\x43\x55\
\xf5\xa1\x9a\x55\x5a\x64\x24\xcb\x79\xa9\x90\x2c\x57\xcf\x65\x89\
\x45\x35\xbd\x12\x6f\xdb\xe3\x5f\xba\x7b\x7e\xf3\xc2\xbf\x1a\xc9\
\x03\xd8\x2e\xc8\xa2\x8e\x68\x20\x8e\x30\xfc\xe2\xf1\x2c\x01\xe4\
\x48\xc8\x35\xa6\x9c\x9b\xba\x40\xe0\x98\x7d\x58\x0f\xf0\xec\xe1\
\x5c\x2a\x3c\x2b\x30\x93\xec\x24\xb0\x9a\xe5\xb7\xe7\xa3\xe4\x34\
\xe5\x6a\x9f\x73\xdf\xfd\x89\x57\xf7\x9b\x9d\x06\x28\xfd\x3c\xe1\
\x44\x21\x78\x28\xa2\x67\x89\xaa\x81\xbd\x8f\x53\x99\x2b\x65\x1c\
\xad\xfb\x0f\xe9\x06\x15\x34\x05\x77\xb3\x9c\xef\xf6\xb3\x2d\x46\
\xf3\x45\x0b\x46\x0f\xe5\x19\x29\x1f\x35\xc9\x8b\xcc\x51\x86\x47\
\xb5\x12\xf6\x42\x3c\x11\x3f\x4f\x52\x90\x73\x31\xbb\xe6\x2b\x94\
\x9e\x55\xb2\xa5\x85\xde\x23\xe3\xbd\xd4\xe4\x5d\xab\x13\x6b\xde\
\xfb\xc3\xc4\xda\xcb\x1a\x46\xed\x9a\xb5\xd9\xdc\xd1\x0c\x64\x81\
\x3a\x8d\xb2\xf1\x1a\x66\x39\x06\xc2\x74\x3e\x4c\xd8\x5b\xf0\x6e\
\xe8\x1d\xa8\x34\x16\x02\xfe\x25\x84\x45\x28\x28\x27\x0a\xa6\x0b\
\xf2\xc6\xe9\x23\xfc\xf4\xc1\xcf\x43\xe6\x52\x78\xd2\xb3\x55\x61\
\xd9\xaa\xde\x20\xdd\x63\xe6\xf3\x1e\x0b\x57\xe8\x93\x5e\x24\x08\
\xdc\x97\xea\xad\xd6\x3e\x4a\xd5\x5c\x2f\xe3\x28\xfb\x19\x49\x8e\
\xfd\xc5\x66\xbb\xf1\xf4\x85\x97\xed\x31\x4b\x6d\x08\x4c\xe9\x97\
\xeb\x92\x0f\x4f\xac\x4e\xf4\x26\xf4\xd2\x43\x7f\x19\xaf\x64\x2c\
\xfb\x91\x88\x53\x3a\xa7\x8c\x58\xc3\x85\xcf\x9d\xeb\x0d\xb9\xc2\
\x32\x10\x53\xef\x71\x64\x7b\xa9\x47\x46\xe4\x5a\x59\x09\x2c\x8c\
\x22\x33\x72\x44\xe3\xd1\x22\xcb\xe4\xca\x7e\x97\x6e\x60\x6c\x97\
\x08\x6e\x39\xb1\x14\xcb\x4c\x12\x58\xa9\x72\x55\xe0\x93\xce\xac\
\x34\x3b\x03\x9f\xdb\x25\x27\xfd\xbf\x4a\xfe\x5b\xb8\x59\x70\x1b\
\x1d\x67\xc1\x38\xdf\x16\xa8\x2f\x55\x13\xa7\xfd\x54\x93\xd1\x58\
\x29\x64\xd9\xc6\xa1\x70\x6c\xbe\x7e\xde\xe7\xf2\x85\xa3\x8b\x33\
\x8e\x56\x09\xe8\x01\x7c\xe9\xa5\x2f\xdc\xd8\xa9\x7b\xe7\x9d\x95\
\xb6\x7b\xc9\x92\x51\x41\x79\x15\x76\x65\x8e\xed\xd5\xb3\xcb\xcb\
\x9a\xa5\x26\xb9\x79\x65\x2b\x75\x09\x35\xf5\x1c\x37\x65\x40\xcd\
\xa0\xde\x77\xd6\x0c\xea\xf3\x60\xf5\xc0\x3e\x0f\xa4\x31\xa8\xef\
\xfd\xbd\xae\x9c\xbe\x63\x09\xec\x56\x96\x22\x18\x1b\x0f\xd1\x5e\
\xef\x86\x97\x6e\xb7\xa3\x0d\xed\xd8\xf9\xe9\xe2\xcd\xd2\xb5\x9b\
\x78\x39\xcd\x3b\xab\xe4\x6c\x82\x69\xc7\x8f\x92\x71\x6c\x9c\x33\
\x43\x11\x37\xf4\xba\xf5\x33\xf3\x3f\x9b\xf9\xb7\x22\x8a\x64\x65\
\xfd\xf6\xaa\x61\x75\x76\xca\xbe\x9b\x23\x51\x59\x37\x8b\x4e\x30\
\x3b\x59\x96\x39\x92\x8d\x4c\x77\xd1\x50\x66\x90\xb8\x99\xa8\xd2\
\x0f\x7d\x1d\xa2\x78\xae\x87\x13\x86\xb2\x2a\x11\x8b\xc5\x59\x1e\
\x7d\xa5\xe8\xb6\xae\x3c\x05\xbe\x55\x53\x38\x1f\xe5\x25\x6f\xd8\
\xe5\xbd\xbf\x32\x5e\xb3\x8f\xe3\xf7\xac\xe1\x17\xc6\x65\xb5\x9f\
\x5f\x0f\x91\x67\xd8\x33\xeb\x46\x09\x09\xfa\x7d\x83\x16\x23\x95\
\x7a\xce\xe7\xb4\x76\xd1\xdc\x14\xa2\xfd\x2d\x61\x76\xfa\x42\x05\
\x59\x78\x28\x8f\x4c\x63\x4b\x19\x09\x2b\x7c\xad\x3a\xdb\x3a\x83\
\x98\xcd\xfa\x39\x2e\xb5\xbc\x4d\x0d\x72\x17\xb6\x65\xe3\x65\xf2\
\x53\x2a\x55\x72\xbc\xaf\x82\x59\x46\xaa\x66\xa4\xcc\x4e\x3a\x27\
\x99\x4d\x7e\x9b\x7f\xf4\xa1\x70\x1b\xb3\x79\x05\x49\x61\xb6\xf0\
\xe3\xed\xd7\x8e\x20\xfc\x7e\xcc\x3c\xc8\x39\x4b\xd6\x4a\x0b\xd4\
\x97\x96\x58\xa7\x1a\x0d\x48\x48\x33\xa7\xd9\xa0\xdd\x74\xad\x5c\
\x95\x4f\x8b\x5a\x2c\x2d\x0d\xdb\x18\x48\xf9\x64\x1b\xdd\xcd\x98\
\xb5\xa1\x9b\x51\xab\x81\xa8\xc1\x0d\xba\xc1\x91\x84\x56\xe2\xe1\
\xc7\xb2\x4d\x62\xd4\xf6\xa1\xe4\xb4\x6e\x96\x9d\xb5\x93\x4c\x5d\
\xdf\x2e\x36\x9a\xda\xb4\x25\xd3\x80\x85\xfa\xf4\x0e\x28\x4b\xa3\
\x3c\x68\xb6\x07\x6d\x20\xa6\x3c\x73\x78\x89\x76\xac\x6a\xf4\x73\
\x35\x98\xe5\x4f\x37\xe1\x84\x58\x62\x2e\x48\x96\x1d\xef\x22\x6f\
\xb1\x46\xc1\x8c\x01\x32\x88\xcf\x52\x3b\x65\x85\x75\xa4\xbf\x59\
\xab\x67\xe8\x6e\xd9\x94\xde\xa7\x31\xdb\xbe\x30\xd8\xea\x51\x34\
\x81\x7c\x19\x8f\xc5\xe6\xa8\x16\x8f\x10\xcd\x93\xab\x27\xce\xf8\
\xd2\x36\x9a\x5e\x6c\x49\xa5\xbe\xf7\xb6\xc2\x6a\xb6\x5a\xb4\xa2\
\xd1\x67\xd9\x51\x79\x1c\x6e\x86\x64\x5d\xda\x93\xa9\xa9\x1a\x3f\
\x65\x1d\x2d\x3a\xb7\xf5\x5c\x3a\xcb\x9e\xb0\xac\x4a\x3b\x6e\x1e\
\xa3\x39\x6b\x88\x97\xa7\x9e\x99\x66\x6a\x5d\x3a\xab\x2e\xe5\xc3\
\xeb\xa0\x9c\xee\xdd\x58\x36\xbf\x46\x0b\x18\x0e\x4b\x85\x21\xc4\
\xa3\xe9\x59\xa3\xbf\x70\x91\xc0\x72\xa0\x9b\x08\x55\x78\xe0\x2d\
\x97\x88\x75\x43\x51\xb8\x02\x0d\x41\x3e\xac\x56\x21\x23\x6f\x68\
\xc8\x3e\x0c\x67\xc2\x82\x91\x06\x3a\x58\xc6\x02\xb9\xbc\x91\x4a\
\x81\xec\x79\x6f\xd7\x7d\x6f\x1b\x7d\xe2\x1a\xfb\x70\x9a\xa6\x37\
\x67\xdb\x3c\x97\xb7\x2a\xee\xb7\xc9\xa1\xf3\x3b\x4d\x98\x31\x59\
\xd5\x0c\x74\x37\x48\x22\xd1\x9b\x86\xc6\xcd\x96\x9d\xa8\xb7\x4c\
\x3b\x7b\x33\x45\x11\xac\x36\x6d\x3a\xe9\x99\x03\x0f\xd3\x4a\xfa\
\x2e\x71\x9a\x63\x0d\xb3\xe2\x02\xcd\x12\x6d\x5b\x5f\xa1\x57\x5c\
\x6c\x2d\x63\xda\x3a\x4e\x91\x7d\xaa\xf3\x7b\xdb\x4e\xb1\x27\xe0\
\x17\x62\x7d\xa0\xf4\x37\x04\x96\x5f\xdd\xc4\xaa\xd2\x83\x02\xde\
\xd0\x2f\xce\xf6\x64\x2f\x78\x89\x5c\x30\x42\xbf\x35\x76\x14\xea\
\x1f\x05\x39\xe4\xcf\xc0\x20\x8c\x12\x38\x77\x15\x86\x41\xe7\xab\
\x0d\x23\x64\xe9\xd8\xef\xfc\xd3\xcb\x4a\x2f\xb4\xff\x90\x8f\x77\
\x87\xbf\xd7\xc6\xb3\xdb\xa9\x96\x49\x86\x15\x3b\x50\x46\xd1\x26\
\x64\x91\x76\x27\xa4\xc8\x5a\xed\xf0\xb7\xd2\x74\xaa\xff\x2d\x79\
\x5f\x73\x2d\xbf\x7b\x39\x7d\x91\x6d\x3f\x55\xd7\xdc\x32\x25\x87\
\xe4\x99\x4a\xd9\xcd\x66\x75\xcc\xbb\xeb\x8c\x22\xb0\x7f\xd0\x9e\
\x84\xf1\xb1\xb2\xf5\x65\x08\x95\xf4\x15\x8f\x8d\x42\xe0\x53\x2a\
\x0b\x5e\x54\x63\x37\x3d\x94\xf0\xa4\xa8\x46\x28\xba\x28\x2a\xff\
\x8f\x98\xb9\xcd\x74\x52\x37\x66\xc8\x4c\x2d\xbf\xc9\xb3\xd9\x5f\
\xfd\x28\x6d\xb2\xed\x57\x53\x66\xf3\x18\x23\x39\x34\xdf\xa0\x3d\
\xad\xb6\x5d\x23\xf8\x85\x60\xed\xd9\x6c\x45\x8a\xe9\xc3\x85\xf7\
\xb4\x67\x25\x2b\x88\x37\xbb\xf7\x84\x5c\x53\x83\xd6\x67\x1a\xfa\
\x75\x1d\x4d\xed\x41\xf3\xe7\xcb\xa7\x65\xd9\x16\x33\xde\x84\x3c\
\xcb\xa7\xaa\x06\xb1\x33\xc3\xe1\xa5\xd6\x28\xb4\x69\xc3\x2b\x6b\
\x1f\xa4\x6e\xd4\x2e\x7f\x56\xcc\x74\xa2\xe2\x9b\xb7\x04\xbf\x30\
\xa7\xfc\x4e\x65\x71\xb0\x97\x4a\xf8\x8f\xc9\x38\x46\xd6\x8d\x1a\
\x4a\x18\x95\x8f\x08\x49\xf0\x7c\x17\x0a\xb3\xf3\x65\x0c\xf1\x1e\
\xfb\x07\xe3\x05\xc2\xa2\xb0\x1e\x2e\x43\x6c\x5e\x60\x56\x8c\x27\
\x21\xd5\x29\xc2\xdd\x42\xe0\xf1\x95\x69\x68\xc9\xdb\xf4\x5b\xf2\
\x0e\x5c\xb9\x2b\x63\x83\x96\x79\x59\x42\x2f\x9f\xf4\xfb\xce\x9a\
\x8e\xb2\x8e\x88\x94\xc2\x98\xfd\x14\x79\x80\x36\xbc\xda\x3e\x3c\
\x2f\xe3\x6a\xcf\x1b\xbd\xcb\x33\xfa\x5d\xde\xf7\xac\x0a\xe3\x18\
\x59\xd4\x01\x7a\x9a\xd8\x48\x0a\xdc\x83\x17\xe4\x7d\x2b\xd6\x94\
\x99\x0e\xc3\x3c\xb4\x6c\x03\xc7\xf4\xf5\x3a\xca\xaf\x75\x5c\x5b\
\x9e\xcc\x7c\x47\x9f\x0f\xd9\x4d\x0d\x8f\xd7\x27\xf7\x0d\x3a\x00\
\xac\x70\xfd\x4e\x20\x86\xfe\x85\x30\x4c\x60\x09\x92\x97\xa5\xc2\
\x98\xbe\x51\x1c\xea\xe0\x30\xdf\x0c\xe1\x5e\xe1\x55\x21\xdf\xf3\
\x02\x65\x9a\x04\x3c\x90\xe3\x85\x90\x6f\x2e\xef\x06\x2f\x64\x93\
\x8f\xa7\x6e\x97\x4d\xb4\x0b\xa3\xfe\x54\x78\x42\x60\x57\xfd\x3f\
\xc5\x72\x5d\x60\x34\x7d\x53\x6d\xc6\x67\x6a\xa8\xdb\x2c\x83\x16\
\xcb\x67\x59\x7e\xf3\xfd\xba\xaf\x97\x7a\x37\x2d\x4b\x63\x25\x23\
\xd7\x26\xc3\x9b\x96\x69\x9c\x50\x2a\x03\x57\xb9\x79\x3a\x84\xdc\
\xc6\x41\x17\x54\xa6\xca\x4b\x26\xf7\x34\xba\x56\xf4\xb3\x62\xe6\
\x5a\xda\x9d\xc8\x32\x02\x8d\x71\x2a\x66\x59\xda\x5c\x31\x27\x4a\
\x35\x5b\x7f\x89\x84\xd9\x47\x5e\xe7\x2a\x85\x6c\xd3\x8d\x54\xf6\
\x5e\x4a\xcc\x4c\x7d\xb7\xb4\x39\x35\x57\x16\x31\x47\x21\x15\xca\
\x58\x2a\x61\xb4\x55\x42\x3f\x81\x07\x6b\xbf\x7d\x8b\x62\x79\xa3\
\xb8\xac\x96\x7d\x2e\x7c\x23\xe4\x52\x72\x37\x5f\x56\x88\x78\x36\
\xea\x2e\x38\x9e\x19\xf9\xb2\xf2\xe6\x5d\xf5\x22\x7d\x0b\x81\x67\
\xa6\xc0\x5e\x5c\x79\x4b\x21\xda\x4e\x48\x35\x4b\x60\x11\xa3\xe4\
\xfa\x7a\x8d\x7b\x69\xa4\xf4\xe0\x76\x3f\x67\x18\xb4\x61\x69\x2f\
\x9d\x32\x4e\xad\x1f\x33\x78\x52\xd0\x32\x85\xf2\xa5\x7f\x29\x3e\
\x96\x50\xe8\x6d\x0e\x28\x94\x37\xdf\x7d\x39\xea\x07\xe7\x37\x7e\
\x79\x82\xfb\x3d\x92\x82\x06\x92\x8f\xa1\x73\xaf\x6a\x7c\xed\xf6\
\xa6\x69\x4d\xd6\x34\xc2\x01\xbb\x34\xb1\x93\x9e\x32\x53\x47\xea\
\x27\x83\xc2\x3a\x1b\x15\xa4\x29\x51\x9e\x76\x94\x80\xde\xd6\xeb\
\x55\x91\xe8\x7a\x9f\x56\x27\x87\x97\x51\xcd\xf3\x4b\x1b\x1b\x8e\
\x0e\xf3\xad\x3f\xda\xc2\xaf\xc5\xeb\xdd\x93\x09\x6a\x9b\x5f\x54\
\x54\xb0\xb9\x32\xdc\x2f\x5b\x6c\x73\xc4\x82\xcb\x76\x21\x62\x68\
\x25\xff\x90\xa9\x20\xbb\x60\x19\x62\xfc\xc3\xac\x88\x3a\x8c\x04\
\x50\xea\xe6\x16\x1d\xe0\xb4\xd3\xaf\x22\x17\x4f\xb6\xf1\xae\x7e\
\x48\x6a\x54\xd8\xc6\x41\x43\xec\xc6\x86\xdb\xd4\xae\x7b\xd2\xc7\
\x58\x8a\x24\x39\xf3\xc5\x7a\x48\xb8\x42\xc6\x41\xc8\xde\x86\xda\
\xd5\x40\xbc\x95\x45\xd7\xab\xbe\x04\x16\x8e\x1d\xfc\xcf\x66\x23\
\x75\xbc\x94\xea\x29\x21\xd0\x9e\x93\x62\x3a\x9e\xcd\x9e\x57\x50\
\x71\xfc\xfc\xb1\x83\xdf\x6c\x0f\x29\xf0\xfc\xda\xd4\x60\x5c\xa4\
\x27\xde\x5f\xa9\x5d\x81\x4e\x07\xd0\x0e\xcd\x1c\x1f\x09\x67\xcd\
\x6f\x6c\xba\x93\x4b\x6f\xdb\x4a\x9a\x8e\xbc\x4c\x14\x4c\xd9\xfa\
\x0f\x22\xfa\xa3\x9f\x03\x6b\xdd\x25\xd5\xae\x8a\xa5\xa6\x46\xd4\
\xe1\x24\xb0\x60\xf4\xae\xef\xe8\xd4\xeb\xb1\xb1\x44\x6c\x7f\x85\
\x35\x07\x69\x90\x37\x55\x27\xbb\xeb\x19\x74\x79\xc4\xb0\x4c\x0f\
\x1a\x94\x30\x53\xcb\xb0\x8f\x37\x36\x37\x3c\xd1\x1e\x33\x87\x5b\
\xb8\x8b\xae\x1e\xbc\x40\x3f\x59\x3b\xb6\xa6\xdf\x4f\x9f\xd1\xb1\
\xfc\x11\x7a\xfe\xdd\x51\x2b\xb2\xd5\x52\xcb\x36\xcf\xa5\x6a\xf3\
\x52\xbd\x69\xf8\xb9\xee\x4d\x95\xe9\xfe\xb1\x2e\x39\x98\xe7\x44\
\x5f\x5d\x0d\x25\x04\xea\x9e\x9c\x56\xdd\x29\x11\x1b\x26\x41\x2c\
\x7f\x88\x67\x41\xd9\x6e\x7a\xa9\xd8\x97\x61\x3a\x9c\x36\x75\xf4\
\x0e\xe9\x07\xdb\xba\x26\x7a\x55\xc6\x53\x89\x2e\x46\x8b\xd6\x92\
\x1c\x8a\xa5\xec\x66\xab\xe9\xbb\x25\x4d\xf5\x0b\xc2\xf8\xf1\x84\
\xa2\xc5\xa8\xdf\x7e\xee\xf9\xc3\x1e\x3d\xcd\x6e\x8d\x3d\xec\xe6\
\x8a\x36\x06\x12\x33\x9a\x1b\xeb\x1b\x1a\x17\x78\x5f\x8e\x2a\xba\
\x8e\xa8\x40\x24\x81\x48\x02\x91\x04\x22\x09\x44\x12\x88\x24\x10\
\x49\x20\x92\x40\x24\x81\x48\x02\x91\x04\x22\x09\x44\x12\x88\x24\
\x10\x49\x60\x55\x96\x00\x4b\xe7\x3e\x47\x72\x56\xe5\x2e\xad\xbe\
\x6d\xcf\x77\x6e\x89\x7b\x1b\x09\x3b\x09\xeb\x0b\xbc\x2c\xe5\x77\
\x0c\x9d\xf3\x4f\xdb\x0b\xfc\xf6\x12\x67\xb1\xf8\x6d\x2a\xf7\x01\
\x3c\xbd\x47\x92\x7e\x61\x87\xb3\x4d\xce\x5a\x33\x2f\xef\x70\x16\
\x89\xb3\x4b\xdc\xe3\x64\x67\xbe\xe3\xeb\x2c\x1f\xae\x2b\xd0\x16\
\x9d\xfb\x4a\x9f\x8d\xe2\x6d\x38\x87\x9f\x7e\x8e\xc6\xe0\xd7\x36\
\xbe\x70\xa5\xe9\x6b\x68\x84\xd2\x0f\x16\xd8\x58\x2d\xf4\xba\xed\
\x31\xca\xb3\xad\x50\xee\x4b\x55\xde\xc6\x23\x03\xe4\xc5\x99\xb3\
\x39\xde\x9b\x45\x5e\x33\x4e\xbb\x09\xc8\x3d\xfb\x25\xb8\x22\x99\
\xe5\xc8\xde\x5f\xe9\x5b\x0a\x8c\x89\xef\x81\xd5\x1c\xe5\x56\x89\
\x64\x5e\x3e\xe2\x68\x37\x87\xec\x38\x1e\xcd\xe7\xdf\x85\x83\x3c\
\xad\xdf\x45\xd7\x2f\x66\xee\x73\xfc\xe0\x63\x81\x1d\x49\x94\xde\
\x21\x5e\x5b\x7d\x52\x70\x0e\x32\xf2\x5b\xb7\x37\x08\x9c\x9a\x84\
\xe7\x7b\x42\xad\x00\x2f\x3f\x42\x29\x39\xb1\xfb\x4f\x81\xbc\xb4\
\xe7\x7d\xe1\x1c\xc1\xf1\xd4\xb4\xf5\x71\xc1\xef\xad\x39\x25\x97\
\x4d\xdd\xc4\xe1\x25\xe1\xe4\x00\x9c\x68\xcb\x84\x00\xf9\x8a\xcd\
\x82\x1c\x26\x09\xf7\x0b\xf9\x1c\x5b\x10\xbe\x1b\x28\xd3\xbf\x84\
\xdd\x82\x64\x2e\x31\xcf\xa9\x2a\x37\x5d\x40\x76\xab\x2c\xe5\x3a\
\x6a\x72\xac\x7a\x74\xba\xf0\x2b\x81\xd7\x5b\xf7\x17\x50\x90\x5f\
\x0b\x5b\x67\x7a\xcb\xac\x72\xa3\x80\x37\xe3\xd5\xd1\xbd\x04\xca\
\x30\x9b\x5c\x26\x38\x83\x88\xb1\x71\x88\xd1\xd9\x44\x42\x70\xe4\
\x3d\x4b\xd8\x53\xc0\xe8\x3e\x17\xae\x13\x98\x6d\xbc\xb4\x9b\x12\
\xc6\x0b\x7f\x11\xf6\x15\x68\xcf\x6d\xc2\xc5\xc2\x3e\x02\x04\xef\
\x5c\x7d\xc9\x64\x29\xfb\x03\xfe\x41\x36\x56\x99\xd5\x7c\x77\x65\
\x4b\x6c\x01\x8e\xa5\x3a\xc3\x33\xa9\x4f\x5e\x74\x0a\xc3\x23\x97\
\x6b\x64\x85\xba\xe3\x8c\x49\x10\x99\x15\xe2\xf5\xa3\xdd\xf7\x3b\
\x6a\xc2\x71\x6d\x0c\x84\x53\xb8\xee\xf3\x29\xe3\x74\x8d\xf2\x1f\
\x21\x10\x3e\xa0\xac\x0c\x1e\xc6\xf0\xa9\x00\xbd\x20\xa0\xcc\x28\
\x31\x7c\xbc\xe1\x08\x1e\x7f\x88\x30\x35\x93\x57\x1f\xe9\x90\x8c\
\x37\x04\xaf\x14\x30\x10\x5e\x05\x75\x08\xe1\x1e\x2d\x30\x73\x5c\
\x2b\x38\x2f\x2b\xdd\xae\xef\xdb\x09\x3b\x0b\xbc\xe3\xe0\x47\x1c\
\x25\xa7\x2e\x8c\x93\xf6\x82\xe6\x4c\xc6\x81\xfa\x24\xa4\x63\x66\
\x84\x08\x13\x37\x15\x38\x27\xe4\x84\x7a\x38\x00\xca\xd3\xe6\xd2\
\x0e\xe7\xc1\xb9\x2d\xd1\x1f\x5e\x67\xfe\xb9\xc0\x11\xff\x97\x05\
\x94\x1f\xf9\xd5\x67\xb2\xae\xa5\xcf\xa1\x42\x4d\x26\xfd\x40\x7d\
\x12\x4e\x32\x33\xe1\x6c\x30\x54\xc7\x00\x91\x3f\xa1\x1f\xe1\xe5\
\x5c\xa1\x56\x70\xbf\xf0\x43\x38\xb6\x9b\xc0\xbb\xf5\xbc\xcb\xff\
\x96\x90\xeb\x30\x9f\x73\x5c\x7f\x33\xe5\xf9\x46\x98\x2e\xcc\x17\
\x20\xda\x48\xbb\x91\x0f\xed\xa6\x1f\x38\x4a\xc6\xf7\x13\x01\xa2\
\x6d\x8c\x07\xf9\x88\x0e\x68\x5b\x3e\x47\x01\x0f\xea\x42\x16\xb4\
\x09\x07\xfc\xa1\xe0\x10\xaf\x54\xef\x2a\xd0\x07\x8e\x82\xcc\x10\
\x9c\x70\xb0\xaf\xbe\xa3\x2b\x8c\x15\x79\x78\x8f\x68\xb2\x80\x6c\
\x70\xba\xe8\x1e\xb2\x98\x29\x94\x45\x7e\x06\xc2\xc0\xac\x2b\xdc\
\x24\xb8\x3b\xc8\x00\xa2\x64\x5b\x0a\x28\x0d\x4a\x86\x82\xe1\xfd\
\xdd\xf4\xb0\x2e\xfe\x24\xf8\x0d\x04\x87\xd6\xbe\x10\x50\xee\x6d\
\x84\x8f\x05\x94\x9e\xe9\x1e\x43\x68\x14\xdc\x84\x90\x79\xe6\x98\
\x22\x38\xc6\xc1\xfd\xa5\xc2\x05\x42\xae\x59\x03\xc1\xff\x56\xa0\
\xbe\x3a\xe1\x97\xc2\x1d\xc2\x2d\x02\x74\x91\xf0\x99\x30\x3a\x73\
\xbd\x85\x3e\x99\x0d\x0f\xce\xa4\x6f\xae\x4f\x8c\x10\x65\xa0\xbd\
\x84\x78\x3f\x11\xca\xf5\xdc\x28\xfe\x6f\x32\x3c\x1b\xf4\x39\x52\
\x40\x01\x4e\x13\xa6\x0b\xc8\xfd\x77\x02\xe3\x32\x4b\x38\x43\x40\
\xe1\x4f\x10\x30\xee\xb3\x05\x14\x99\x50\xaf\x42\x48\x0a\x3b\x09\
\x84\x9c\x87\x0b\xc7\x09\xcc\xd0\x28\x38\xcf\x84\xc8\x00\x25\xc2\
\x09\x9d\x29\x5c\x2b\xdc\x2b\x78\x09\x5e\x18\x20\xc6\xf8\x9e\xb0\
\x9e\x40\x9f\x29\xc3\xf8\x32\xd6\x94\x75\x66\x7b\xda\x37\x46\xe0\
\x25\xb2\xab\x05\xf4\x01\x59\x12\x49\xa0\x23\xcc\xec\x1b\x0a\xb4\
\x3d\x17\xc1\x0b\xa7\x88\x73\x60\x36\x63\x8c\xce\x13\x30\x04\xde\
\xef\x61\x3c\xe0\x81\x8e\x20\xa7\x57\x84\xff\x13\xd0\x03\x1c\xd7\
\x58\x61\xb6\xc0\x18\x6d\x24\xd0\x7f\xf4\xa7\x52\xe8\x21\x20\xd3\
\x63\x05\x64\x53\x32\xf9\x29\x58\x57\x71\x43\x00\x28\x96\x97\x48\
\xc3\x3a\x19\xa4\x35\x05\x8c\x86\x81\x83\xcf\x51\x02\xb3\x4c\x52\
\x38\x47\xc0\xeb\xf8\xd1\xcd\x4a\xa4\x53\x7f\x16\xb0\x7a\x66\x2a\
\x14\x16\xa1\x78\x8d\x8a\x81\xc3\x33\xf9\xb5\x05\xe1\xbb\x67\x1b\
\x5d\xa6\x0d\x9a\xb6\x9c\x2f\xa0\x14\x47\x08\x18\x1e\x83\xcb\x60\
\xf3\x30\x4f\x1e\xfa\x07\x1c\xc2\x9b\x31\xc8\x7c\x42\x0c\x16\x7d\
\xa3\x3c\x8a\xc2\x60\x75\x16\xf2\x79\x44\xca\xe5\x23\xfa\x81\x51\
\x33\xd0\x0c\xe6\x31\xc2\xb3\x42\x6f\xc1\xa9\x77\x3f\x7d\x67\x80\
\x4f\x10\x08\x57\x2f\x17\xf0\xa4\xce\xcc\xe7\x6e\xf7\x00\xa5\x0f\
\x17\x50\x1a\x94\xe1\x78\x01\x87\x80\x21\xc3\x8f\xba\x50\xee\xc3\
\x84\x23\x85\x3b\x85\x53\x04\xf8\x7b\xfb\xb1\x83\xd2\x68\x0f\x61\
\x2b\x4a\x85\xcc\x18\xdf\x93\x05\x08\x99\xba\xe5\x43\x1a\xd7\x28\
\x36\xc4\x6c\x42\x1d\x97\x08\x18\xe9\x49\x02\x8e\x85\x3c\xde\xba\
\x94\x94\x1e\x6b\xea\x7a\x4c\x40\x16\xc8\xf9\x4d\x81\xf6\x41\x23\
\x84\xcd\x04\x64\x0f\x3f\x64\x31\x4c\xd8\x9b\x9b\x22\xfa\x87\x33\
\xb9\x5b\xc0\xd0\x30\xac\x3d\x05\x66\xbc\x43\x05\x78\x22\x8b\xbd\
\x84\xb2\xc8\xcf\x40\xe8\x90\xa3\x68\x5e\xe6\xe4\x77\xee\xe3\x4d\
\xdd\xe5\x69\x30\xde\x1e\x61\xa3\xa0\x6b\x7b\x0b\x67\xae\xdf\xd1\
\x27\x1d\x40\x09\x99\x6d\x50\x74\xbc\xde\x5d\x02\xca\xe2\x26\x47\
\xb8\x7e\xed\xf4\x64\x6d\xbd\x44\xa1\xb6\x12\x9e\x16\x16\x65\x52\
\xa7\xea\x13\x81\x6d\x99\xab\x50\x26\x9d\xfa\x70\x00\x0c\x38\xe5\
\x31\x12\x88\xf2\x33\x05\x47\x21\x32\xc9\x45\x7d\xf4\x51\x6e\x3c\
\xe2\x5f\x04\xbc\x3a\xce\xe0\xaf\xc2\x97\x82\xd3\x3f\x8c\xe8\x5b\
\x61\xb1\x00\x31\xe0\x18\x05\x8a\xe6\xa5\x06\x25\x30\x93\xe2\x5d\
\x51\x4e\xc2\x0e\xe4\x8e\x57\xaf\x11\x98\x05\x9f\x10\x90\x01\x63\
\x35\x49\x40\x91\x08\x53\x1c\x83\xd4\xd7\x34\x6d\x27\x7c\x25\x60\
\xbc\xd0\x5c\x61\x8a\xb0\xa3\x40\x9f\xfd\x94\xdc\x9d\xb6\xad\xf2\
\x30\x6b\xd5\x52\x38\xf3\xfd\x71\x7d\xd2\x47\x6f\x5d\xdc\xef\x2f\
\xd0\x46\xf2\x30\x2e\xc8\xe3\x0a\xe1\x3a\x81\xfe\xfe\x5c\x78\x4d\
\xf8\x48\x80\xde\x16\x3e\x10\x98\x2d\x21\xe4\xf5\x99\x30\x7d\xd9\
\x65\xfa\x15\x62\x9c\xe5\x73\x02\xbc\xd0\xa9\x39\x02\x75\x94\x45\
\x6e\x2f\xea\x30\x62\x80\x68\xb4\xa3\xe0\xcc\x28\xdb\x08\x6f\x64\
\xd2\xf0\xcc\xcc\x00\x78\x75\x06\x1c\x2f\xcf\xd4\x46\xe7\x10\x1a\
\xc2\xbe\x4f\xf0\x12\xf7\xf0\x4a\xdc\xc7\x5b\x3c\x95\x01\xf9\x10\
\xc8\x1f\x85\x3d\x84\xfb\x49\xc8\x10\x0a\x40\x7b\xfc\x8c\x6d\x90\
\xd2\xa9\x1b\x41\x3a\x44\x1d\xa4\x31\xc3\x39\xc6\xc1\x3d\xda\x0b\
\xe8\x8b\x1f\x21\x70\x67\x20\x73\x95\xa7\x1d\xe5\x10\xca\x4f\x3d\
\xee\x76\xa1\xe4\x28\xac\x43\x18\x22\xce\x02\x65\x79\x57\xc0\x83\
\xce\x10\x18\x6c\x2f\x91\x76\xa1\x70\xa2\x80\x87\xc5\x2b\xcf\x16\
\xce\x16\x90\x1b\x46\xe5\x18\x9a\xbe\xa6\x9f\x17\x90\xbb\x1f\x75\
\x53\xe2\x12\x81\x71\x74\x88\xfc\x84\xb8\x7e\x4e\x01\x59\xb9\xd3\
\x71\x2a\xf4\x83\x7a\x1d\xa2\xbc\x37\x22\x70\xee\x21\x0b\x8c\x96\
\x3a\x1d\xc2\x28\x01\x7c\x69\x0f\xdf\x1d\x82\x0f\xf2\x77\xc6\x8f\
\xfa\x29\xcb\x98\x3a\x84\xce\x22\x4f\x08\x3d\x80\xbf\x9f\x71\x66\
\xb2\x04\xfb\xf0\xf3\xcc\x78\x02\xe2\xbe\x61\x82\xd3\xd8\x4b\xf5\
\x9d\xc1\xc0\x53\xbc\x2e\xd0\x60\x0c\x66\x13\x81\xb0\x05\x72\x3c\
\xca\x60\x7d\xc7\x10\xbc\x5e\x87\xeb\x6a\xe1\x56\xe1\xc0\x74\x89\
\xe5\x34\x4b\x5f\xe9\x9c\x57\x81\xf1\x06\x6f\x09\xbb\x08\xbd\x5c\
\xf9\x11\xe0\x35\x02\x9e\xd3\x21\xf8\x23\x10\x06\x0a\xc5\xe8\xe7\
\xba\x57\xa9\xef\x0c\x22\x7d\x83\x10\x1e\x86\xe0\x10\xb3\x1f\x46\
\x05\x21\x78\x94\x78\xfd\xcc\x35\x1f\x94\x67\x76\xf3\xf6\xc9\x95\
\xa5\xe0\x57\x14\x06\x05\xa4\x2e\x87\x30\x7c\x78\x3b\x7c\xfb\xeb\
\x3b\x6d\x44\x0e\xc8\x91\x19\xe0\x32\xc1\xad\xb8\xba\x4c\x13\xce\
\x89\xb0\x8f\xb0\x64\x5f\xe1\x70\x01\x19\x1d\x2a\xa0\x4c\xf4\xc3\
\x5d\xd7\x06\xba\x3e\x43\xf0\x1b\x1b\x66\x0f\xc6\x86\xd8\xdd\xa1\
\x0d\xf4\x85\x19\x8c\xba\x91\x17\xba\xe2\x18\x05\xb2\xa4\xed\x4e\
\xbb\x99\x05\x31\x50\x66\x6f\x87\xe8\x8b\x7b\xe6\xc3\x19\x3b\x32\
\x67\x66\x86\x9f\x3b\x62\xd8\x5e\xd7\x27\x64\xd2\x99\x0d\xd6\x13\
\x9c\xfa\x30\x28\x66\x48\xda\xb9\x42\xc9\xcf\x40\x50\xca\xdf\x0b\
\xc4\x74\xe3\x04\x84\x8c\x51\x8c\x16\xe8\x30\x9e\x1e\x7a\x56\xc0\
\x63\xdd\x20\xec\x25\x10\xbe\x9c\x29\xe0\xcd\xb0\x66\xaf\xf5\x52\
\xd7\x67\x02\xe5\x30\xb8\x63\x85\x2d\x84\x1d\x84\x31\x02\x83\x80\
\xb7\xf4\xd2\xbd\x4a\x58\x4b\xb8\x5e\xd8\x59\x60\x06\x1a\x2f\xa0\
\x20\x3c\xbf\x40\x78\x12\x84\xfd\x33\x01\x43\xa3\x8e\xa3\x84\x1d\
\x05\x04\xfd\x4b\xc1\xed\x41\x3f\xd1\x35\x46\x07\x2f\xea\x3f\x5e\
\x60\x00\x21\x78\x51\x7e\x84\x40\x7d\xeb\x08\xa7\x09\x1b\x09\xb4\
\xb1\x54\x72\x42\x18\x14\x7a\x53\x01\x03\x44\x5e\x35\x82\xa3\x68\
\x84\x10\xc8\xff\x4f\xc2\x7d\x02\xf2\x40\x8e\x5e\x59\x2a\x29\xbd\
\xd7\x74\xa3\x40\x0c\x8e\x41\x75\x11\xe8\x03\xc6\x01\x5e\x10\x98\
\x81\xe8\x1f\xca\x3a\x4a\x38\x4c\xc0\xb9\x35\x0b\x10\x63\x46\xd9\
\x5a\x01\x63\x3b\x49\x40\x11\x19\x7b\xc6\xf4\x49\x01\xc2\x9b\xe3\
\x40\xa8\x0b\xb9\x9f\x2c\xfc\x44\x70\xe4\xf1\xb2\xbe\xa3\x1b\xa7\
\x0a\x94\x1f\x22\x20\x7f\xb7\x43\x39\x57\xd7\x8c\x25\xe3\x84\xfc\
\xdf\x11\xce\x12\x70\xb0\x84\x83\x13\x04\xda\x8a\xee\x3c\x25\xe0\
\x8c\x0f\x11\xe8\xe7\xff\x08\x8c\xe3\x64\xc1\x21\x3f\x99\xb8\x6e\
\xfb\xca\xcc\x7d\x3f\xd0\x77\x47\x29\xbc\x99\xf1\x5c\x08\x9c\xe9\
\x1a\xa1\x22\x88\x0f\x04\x94\xc7\x11\x0a\x56\x4e\x07\xc7\x0a\x37\
\x09\x08\x03\xcf\x70\x9d\x80\xe2\x39\x5e\x6f\x9e\xbe\xcf\x12\x28\
\xc7\xc0\x5c\x29\xc0\xe7\x62\xc1\x31\xd0\xcf\xf5\x9d\xba\xde\x17\
\xbc\xc4\x0c\x82\x82\x63\x54\x77\x09\xd4\x83\xc7\x42\xe0\xaf\x64\
\x32\x4f\xd7\x27\x0a\x0d\x4f\x94\xf9\x56\x01\x8f\x46\xbb\x50\x08\
\x66\x94\x4b\x04\xca\x41\xf7\x08\x03\x85\x49\x02\x6d\x46\x11\xf9\
\x74\x14\xe7\x0f\xfa\x8e\x02\xff\x56\x40\xd9\x3e\x16\x1e\x17\xdc\
\xe1\x91\x2e\x8b\x22\xda\xc1\xac\x07\x50\x14\xf8\x62\x34\xcc\x18\
\xce\x60\x4f\xd5\xf7\xa3\x85\x2b\x04\x0c\x05\x8f\x8b\x8c\xae\x16\
\x9e\x16\x68\xbf\xe3\x85\x5f\xd3\xf7\x5b\x84\xf3\x05\xe4\x83\x02\
\x93\x86\x71\x41\x18\x0f\xb3\x05\xb2\x40\xf6\xf4\x0f\x23\xa1\x5e\
\x42\x21\x9c\x00\x8a\xce\xb8\x3e\x2f\xe0\x00\x2f\x10\x30\x02\xf4\
\xe2\x61\xe1\x21\x01\xfa\x48\xb8\x4d\x20\x3f\x8e\x0d\xe5\x7e\x44\
\xa8\xe3\xa6\x08\xf9\x24\x85\x8b\x04\x66\x33\xe4\x34\x45\x58\x43\
\xa0\xdf\x10\x8e\xa0\x9f\x80\x21\x31\xfe\x97\x09\x38\xe0\xfb\x04\
\xfa\x4f\x3b\x90\x0d\xf4\x9c\x30\x40\xb8\x50\xa0\x4d\x8c\x39\x06\
\x44\xff\x20\xf8\x7f\x2a\x38\xbc\xd1\x35\x8c\xee\xbb\x65\xb7\xd3\
\x7f\x67\x0b\x65\xcf\x38\x85\xac\xb0\x97\x2a\x59\x57\xa0\x01\x0b\
\x04\x3c\x0c\x5e\x05\x01\x39\x84\xe7\xc1\xba\x31\xa8\x2f\x04\x84\
\xc6\x54\xbd\x44\x40\xe1\xf0\x50\x0c\x2a\x1e\xdc\xf1\x28\x4c\x9d\
\x6b\x0b\x4c\xeb\x08\x8b\x72\xe4\xcf\x47\xdd\x75\x93\x7a\x28\x4b\
\xe7\xdd\xca\x4a\x3f\xe0\xc5\x3d\x14\x0e\x85\x60\x20\x18\x10\x06\
\x69\x8e\x40\xfd\x6e\xa2\x8d\xf4\x0d\xe3\xf9\x5a\x80\x3f\x3c\x1d\
\xa1\xa3\x70\x18\x09\xca\xf2\x99\x00\x6f\xbc\x1b\xed\xcd\x47\x28\
\x21\x7d\x46\xe1\xfc\x88\xf0\xb0\xbf\x80\x92\x42\x28\xf4\x79\x02\
\x0a\x75\x93\x80\x9c\x7f\x93\xbe\xb3\xac\x4e\x14\x04\xcf\x8a\xe2\
\xd2\x06\xfa\x4a\x9b\x21\x8c\xa7\xb7\xb0\x96\x80\xe2\xe3\x68\x1c\
\xc7\xc4\x7d\xe4\xee\xc8\xc0\x2b\x33\xe4\x82\xcc\x18\x2f\x47\xb1\
\xb8\xc6\x63\xc3\x1f\x99\x39\xb2\xd0\xd7\xb4\x1c\x90\x17\xe5\x66\
\x09\xb4\x05\x39\x3b\xe3\x46\xbb\x68\x07\xed\x41\x9e\xf4\x03\xdd\
\x80\x17\xe3\x8e\xbc\x91\x29\x86\xea\xf0\x45\x4e\xb4\x0f\x82\xa7\
\x5b\xc1\xe9\x1b\x6d\xc1\xd1\xc1\x8f\x71\x75\xf4\x07\xbe\xe8\x1b\
\x75\x90\x46\x5b\x70\x06\xc8\xc0\xe9\x3f\xf5\x51\x4f\x21\xbd\x52\
\x96\xf0\xc8\x19\xa0\xf0\x38\x76\x3c\x4e\xc3\xd5\x25\x94\x39\x08\
\x6d\xa0\x4c\xef\x0a\x7b\x08\x28\xd8\x63\x02\xb3\x05\x83\x8b\x12\
\xf4\x14\xae\x12\xf0\xa8\x28\x66\x44\x2b\x58\x02\x78\x85\x62\xc8\
\xed\x51\x8a\x29\xb7\x3a\xe5\x7d\xa6\x88\xce\x32\x23\x11\xdf\x13\
\x4e\xe1\x09\x09\x63\x2e\x17\x9e\x10\xf0\xbc\xcc\x36\xcc\x84\xe3\
\x04\xf2\x44\xb4\x82\x25\x50\x28\xc4\x5a\xc1\xcd\x59\xed\xaa\xc3\
\x41\x11\x6a\x12\xe6\x10\xba\x31\x1e\x84\x15\x9b\x0a\x18\x07\xe1\
\xc8\x87\x42\xd9\xb1\xb4\x78\x44\x14\x49\x20\x92\x40\x24\x81\x48\
\x02\x91\x04\x22\x09\x44\x12\x88\x24\x10\x49\x20\x92\x40\x24\x81\
\x48\x02\x91\x04\x22\x09\x44\x12\x88\x24\x10\x49\x20\x92\x40\x24\
\x81\x48\x02\x91\x04\x56\x37\x09\xfc\x17\x56\x4e\x4f\x7c\x3f\x83\
\x3c\x91\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xfd\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdd\x04\x17\x06\x0f\x24\xe5\x2c\xe8\x1b\x00\x00\x01\x8a\x49\x44\
\x41\x54\x58\xc3\xed\x96\x31\x52\xc2\x50\x10\x86\xff\xff\x11\xc5\
\x46\x4e\xc0\x05\xf4\x02\xd8\xc9\x0c\xc6\x19\xea\x5c\x80\xd6\x26\
\x36\x10\x1b\x4a\x1a\x90\xc6\x54\x5e\x82\xca\x82\x99\xa0\x33\xb6\
\x5c\x00\x2f\x10\x2f\x10\x1b\x1c\x34\x6b\x11\x64\x44\x60\xf2\x1e\
\x43\x26\x4d\xb6\x4c\x36\xbb\xdf\xdb\xfc\xff\xce\xa3\x88\x20\xcf\
\x50\xc8\x39\x0a\x80\xdc\x01\x2c\xd3\x0f\xd8\x0e\x6c\x90\x5d\x90\
\xd5\xb5\x17\x22\x21\x48\x5f\x06\x57\x23\xa3\x7a\x26\x2e\x60\x3b\
\xb0\xa1\xd4\x63\x4a\x9a\x67\x02\x61\xf6\x0b\xc8\xae\x46\x56\x37\
\x3b\x0d\xfc\x1f\xfb\xf6\xa8\xb0\xfd\x72\x9e\xaf\x08\xf9\x7d\x7a\
\x70\x00\xba\xe3\x4a\xbe\x36\x2c\x97\xce\xf2\x05\x20\x2f\x0c\xca\
\x3a\x07\x05\x48\x44\x45\x57\x1f\x16\x0e\xbd\x67\x67\xaf\x3d\xc0\
\x4e\x50\xdb\x38\x0d\xe1\xec\x35\x5f\xc1\x08\x12\x4f\x40\x44\xab\
\x67\x9f\xdf\x6f\xe2\x37\xa3\x0d\x00\x7a\x93\x5b\x80\x2d\x00\x69\
\x62\x8b\x00\xf4\x30\x5f\x4c\x7e\x0b\xd1\x1d\x57\x70\x72\x64\x43\
\xc4\xd5\xb3\xaa\xf8\x32\xb0\x1f\x56\x00\xcb\xe6\x3a\x23\x8e\x30\
\x5f\x5c\xfe\x3d\xc1\x86\x53\xca\xd6\x93\x09\xc4\x52\x03\x6c\x69\
\x0e\xb5\xb7\xab\x79\x52\xb2\x19\x81\xf4\x75\x8d\x0d\x00\x6a\xb9\
\xb5\xf4\x3c\x2e\x71\x98\x5e\xd7\x9a\x6a\x6b\xb5\x13\xd4\x94\xc9\
\xd6\xd2\x62\xec\xd7\xc3\x6c\xf6\x40\x66\x17\x12\x75\xfc\xae\x7d\
\xba\xfb\xeb\xe9\x41\xbb\x4b\xe9\x43\x49\xbf\x1e\x42\x44\x67\x6c\
\x26\xcd\xd3\x73\x45\x42\x19\x36\x66\xc9\x2f\x10\x75\xb3\xf4\xf7\
\x6e\xfb\xc5\xec\x69\xb7\x4f\x72\xa3\x14\x02\x6f\x7d\x11\xdd\xbd\
\x56\x11\x7f\xb9\x80\xd4\x56\x3e\x16\x09\x01\x4e\xa1\x2c\xdf\x54\
\x5c\x5b\xeb\x01\x11\xc0\x29\x62\xf8\x32\x6c\xcc\x8c\xaf\x64\xc5\
\xb5\xbc\x00\x28\x00\xb2\x88\x1f\x66\x62\x9a\xe2\x52\x1b\xa9\xf7\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x09\
\x00\x9a\xd9\x74\
\x00\x71\
\x00\x67\x00\x69\x00\x73\x00\x63\x00\x6c\x00\x6f\x00\x75\x00\x64\
\x00\x08\
\x05\xe2\x59\x27\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x42\x00\x00\x00\x00\x00\x01\x00\x00\x1c\x63\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
gpl-2.0
| -8,903,654,001,400,234,000 | -1,262,675,634,783,502,300 | 61.011173 | 96 | 0.726486 | false |
public-ink/public-ink
|
server/appengine/lib/matplotlib/_color_data.py
|
12
|
34896
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
import six
BASE_COLORS = {
'b': (0, 0, 1),
'g': (0, 0.5, 0),
'r': (1, 0, 0),
'c': (0, 0.75, 0.75),
'm': (0.75, 0, 0.75),
'y': (0.75, 0.75, 0),
'k': (0, 0, 0),
'w': (1, 1, 1)}
# These colors are from Tableau
TABLEAU_COLORS = (
('blue', '#1f77b4'),
('orange', '#ff7f0e'),
('green', '#2ca02c'),
('red', '#d62728'),
('purple', '#9467bd'),
('brown', '#8c564b'),
('pink', '#e377c2'),
('gray', '#7f7f7f'),
('olive', '#bcbd22'),
('cyan', '#17becf'),
)
# Normalize name to "tab:<name>" to avoid name collisions.
TABLEAU_COLORS = OrderedDict(
('tab:' + name, value) for name, value in TABLEAU_COLORS)
# This mapping of color names -> hex values is taken from
# a survey run by Randel Monroe see:
# http://blog.xkcd.com/2010/05/03/color-survey-results/
# for more details. The results are hosted at
# https://xkcd.com/color/rgb.txt
#
# License: http://creativecommons.org/publicdomain/zero/1.0/
XKCD_COLORS = {
'cloudy blue': '#acc2d9',
'dark pastel green': '#56ae57',
'dust': '#b2996e',
'electric lime': '#a8ff04',
'fresh green': '#69d84f',
'light eggplant': '#894585',
'nasty green': '#70b23f',
'really light blue': '#d4ffff',
'tea': '#65ab7c',
'warm purple': '#952e8f',
'yellowish tan': '#fcfc81',
'cement': '#a5a391',
'dark grass green': '#388004',
'dusty teal': '#4c9085',
'grey teal': '#5e9b8a',
'macaroni and cheese': '#efb435',
'pinkish tan': '#d99b82',
'spruce': '#0a5f38',
'strong blue': '#0c06f7',
'toxic green': '#61de2a',
'windows blue': '#3778bf',
'blue blue': '#2242c7',
'blue with a hint of purple': '#533cc6',
'booger': '#9bb53c',
'bright sea green': '#05ffa6',
'dark green blue': '#1f6357',
'deep turquoise': '#017374',
'green teal': '#0cb577',
'strong pink': '#ff0789',
'bland': '#afa88b',
'deep aqua': '#08787f',
'lavender pink': '#dd85d7',
'light moss green': '#a6c875',
'light seafoam green': '#a7ffb5',
'olive yellow': '#c2b709',
'pig pink': '#e78ea5',
'deep lilac': '#966ebd',
'desert': '#ccad60',
'dusty lavender': '#ac86a8',
'purpley grey': '#947e94',
'purply': '#983fb2',
'candy pink': '#ff63e9',
'light pastel green': '#b2fba5',
'boring green': '#63b365',
'kiwi green': '#8ee53f',
'light grey green': '#b7e1a1',
'orange pink': '#ff6f52',
'tea green': '#bdf8a3',
'very light brown': '#d3b683',
'egg shell': '#fffcc4',
'eggplant purple': '#430541',
'powder pink': '#ffb2d0',
'reddish grey': '#997570',
'baby shit brown': '#ad900d',
'liliac': '#c48efd',
'stormy blue': '#507b9c',
'ugly brown': '#7d7103',
'custard': '#fffd78',
'darkish pink': '#da467d',
'deep brown': '#410200',
'greenish beige': '#c9d179',
'manilla': '#fffa86',
'off blue': '#5684ae',
'battleship grey': '#6b7c85',
'browny green': '#6f6c0a',
'bruise': '#7e4071',
'kelley green': '#009337',
'sickly yellow': '#d0e429',
'sunny yellow': '#fff917',
'azul': '#1d5dec',
'darkgreen': '#054907',
'green/yellow': '#b5ce08',
'lichen': '#8fb67b',
'light light green': '#c8ffb0',
'pale gold': '#fdde6c',
'sun yellow': '#ffdf22',
'tan green': '#a9be70',
'burple': '#6832e3',
'butterscotch': '#fdb147',
'toupe': '#c7ac7d',
'dark cream': '#fff39a',
'indian red': '#850e04',
'light lavendar': '#efc0fe',
'poison green': '#40fd14',
'baby puke green': '#b6c406',
'bright yellow green': '#9dff00',
'charcoal grey': '#3c4142',
'squash': '#f2ab15',
'cinnamon': '#ac4f06',
'light pea green': '#c4fe82',
'radioactive green': '#2cfa1f',
'raw sienna': '#9a6200',
'baby purple': '#ca9bf7',
'cocoa': '#875f42',
'light royal blue': '#3a2efe',
'orangeish': '#fd8d49',
'rust brown': '#8b3103',
'sand brown': '#cba560',
'swamp': '#698339',
'tealish green': '#0cdc73',
'burnt siena': '#b75203',
'camo': '#7f8f4e',
'dusk blue': '#26538d',
'fern': '#63a950',
'old rose': '#c87f89',
'pale light green': '#b1fc99',
'peachy pink': '#ff9a8a',
'rosy pink': '#f6688e',
'light bluish green': '#76fda8',
'light bright green': '#53fe5c',
'light neon green': '#4efd54',
'light seafoam': '#a0febf',
'tiffany blue': '#7bf2da',
'washed out green': '#bcf5a6',
'browny orange': '#ca6b02',
'nice blue': '#107ab0',
'sapphire': '#2138ab',
'greyish teal': '#719f91',
'orangey yellow': '#fdb915',
'parchment': '#fefcaf',
'straw': '#fcf679',
'very dark brown': '#1d0200',
'terracota': '#cb6843',
'ugly blue': '#31668a',
'clear blue': '#247afd',
'creme': '#ffffb6',
'foam green': '#90fda9',
'grey/green': '#86a17d',
'light gold': '#fddc5c',
'seafoam blue': '#78d1b6',
'topaz': '#13bbaf',
'violet pink': '#fb5ffc',
'wintergreen': '#20f986',
'yellow tan': '#ffe36e',
'dark fuchsia': '#9d0759',
'indigo blue': '#3a18b1',
'light yellowish green': '#c2ff89',
'pale magenta': '#d767ad',
'rich purple': '#720058',
'sunflower yellow': '#ffda03',
'green/blue': '#01c08d',
'leather': '#ac7434',
'racing green': '#014600',
'vivid purple': '#9900fa',
'dark royal blue': '#02066f',
'hazel': '#8e7618',
'muted pink': '#d1768f',
'booger green': '#96b403',
'canary': '#fdff63',
'cool grey': '#95a3a6',
'dark taupe': '#7f684e',
'darkish purple': '#751973',
'true green': '#089404',
'coral pink': '#ff6163',
'dark sage': '#598556',
'dark slate blue': '#214761',
'flat blue': '#3c73a8',
'mushroom': '#ba9e88',
'rich blue': '#021bf9',
'dirty purple': '#734a65',
'greenblue': '#23c48b',
'icky green': '#8fae22',
'light khaki': '#e6f2a2',
'warm blue': '#4b57db',
'dark hot pink': '#d90166',
'deep sea blue': '#015482',
'carmine': '#9d0216',
'dark yellow green': '#728f02',
'pale peach': '#ffe5ad',
'plum purple': '#4e0550',
'golden rod': '#f9bc08',
'neon red': '#ff073a',
'old pink': '#c77986',
'very pale blue': '#d6fffe',
'blood orange': '#fe4b03',
'grapefruit': '#fd5956',
'sand yellow': '#fce166',
'clay brown': '#b2713d',
'dark blue grey': '#1f3b4d',
'flat green': '#699d4c',
'light green blue': '#56fca2',
'warm pink': '#fb5581',
'dodger blue': '#3e82fc',
'gross green': '#a0bf16',
'ice': '#d6fffa',
'metallic blue': '#4f738e',
'pale salmon': '#ffb19a',
'sap green': '#5c8b15',
'algae': '#54ac68',
'bluey grey': '#89a0b0',
'greeny grey': '#7ea07a',
'highlighter green': '#1bfc06',
'light light blue': '#cafffb',
'light mint': '#b6ffbb',
'raw umber': '#a75e09',
'vivid blue': '#152eff',
'deep lavender': '#8d5eb7',
'dull teal': '#5f9e8f',
'light greenish blue': '#63f7b4',
'mud green': '#606602',
'pinky': '#fc86aa',
'red wine': '#8c0034',
'shit green': '#758000',
'tan brown': '#ab7e4c',
'darkblue': '#030764',
'rosa': '#fe86a4',
'lipstick': '#d5174e',
'pale mauve': '#fed0fc',
'claret': '#680018',
'dandelion': '#fedf08',
'orangered': '#fe420f',
'poop green': '#6f7c00',
'ruby': '#ca0147',
'dark': '#1b2431',
'greenish turquoise': '#00fbb0',
'pastel red': '#db5856',
'piss yellow': '#ddd618',
'bright cyan': '#41fdfe',
'dark coral': '#cf524e',
'algae green': '#21c36f',
'darkish red': '#a90308',
'reddy brown': '#6e1005',
'blush pink': '#fe828c',
'camouflage green': '#4b6113',
'lawn green': '#4da409',
'putty': '#beae8a',
'vibrant blue': '#0339f8',
'dark sand': '#a88f59',
'purple/blue': '#5d21d0',
'saffron': '#feb209',
'twilight': '#4e518b',
'warm brown': '#964e02',
'bluegrey': '#85a3b2',
'bubble gum pink': '#ff69af',
'duck egg blue': '#c3fbf4',
'greenish cyan': '#2afeb7',
'petrol': '#005f6a',
'royal': '#0c1793',
'butter': '#ffff81',
'dusty orange': '#f0833a',
'off yellow': '#f1f33f',
'pale olive green': '#b1d27b',
'orangish': '#fc824a',
'leaf': '#71aa34',
'light blue grey': '#b7c9e2',
'dried blood': '#4b0101',
'lightish purple': '#a552e6',
'rusty red': '#af2f0d',
'lavender blue': '#8b88f8',
'light grass green': '#9af764',
'light mint green': '#a6fbb2',
'sunflower': '#ffc512',
'velvet': '#750851',
'brick orange': '#c14a09',
'lightish red': '#fe2f4a',
'pure blue': '#0203e2',
'twilight blue': '#0a437a',
'violet red': '#a50055',
'yellowy brown': '#ae8b0c',
'carnation': '#fd798f',
'muddy yellow': '#bfac05',
'dark seafoam green': '#3eaf76',
'deep rose': '#c74767',
'dusty red': '#b9484e',
'grey/blue': '#647d8e',
'lemon lime': '#bffe28',
'purple/pink': '#d725de',
'brown yellow': '#b29705',
'purple brown': '#673a3f',
'wisteria': '#a87dc2',
'banana yellow': '#fafe4b',
'lipstick red': '#c0022f',
'water blue': '#0e87cc',
'brown grey': '#8d8468',
'vibrant purple': '#ad03de',
'baby green': '#8cff9e',
'barf green': '#94ac02',
'eggshell blue': '#c4fff7',
'sandy yellow': '#fdee73',
'cool green': '#33b864',
'pale': '#fff9d0',
'blue/grey': '#758da3',
'hot magenta': '#f504c9',
'greyblue': '#77a1b5',
'purpley': '#8756e4',
'baby shit green': '#889717',
'brownish pink': '#c27e79',
'dark aquamarine': '#017371',
'diarrhea': '#9f8303',
'light mustard': '#f7d560',
'pale sky blue': '#bdf6fe',
'turtle green': '#75b84f',
'bright olive': '#9cbb04',
'dark grey blue': '#29465b',
'greeny brown': '#696006',
'lemon green': '#adf802',
'light periwinkle': '#c1c6fc',
'seaweed green': '#35ad6b',
'sunshine yellow': '#fffd37',
'ugly purple': '#a442a0',
'medium pink': '#f36196',
'puke brown': '#947706',
'very light pink': '#fff4f2',
'viridian': '#1e9167',
'bile': '#b5c306',
'faded yellow': '#feff7f',
'very pale green': '#cffdbc',
'vibrant green': '#0add08',
'bright lime': '#87fd05',
'spearmint': '#1ef876',
'light aquamarine': '#7bfdc7',
'light sage': '#bcecac',
'yellowgreen': '#bbf90f',
'baby poo': '#ab9004',
'dark seafoam': '#1fb57a',
'deep teal': '#00555a',
'heather': '#a484ac',
'rust orange': '#c45508',
'dirty blue': '#3f829d',
'fern green': '#548d44',
'bright lilac': '#c95efb',
'weird green': '#3ae57f',
'peacock blue': '#016795',
'avocado green': '#87a922',
'faded orange': '#f0944d',
'grape purple': '#5d1451',
'hot green': '#25ff29',
'lime yellow': '#d0fe1d',
'mango': '#ffa62b',
'shamrock': '#01b44c',
'bubblegum': '#ff6cb5',
'purplish brown': '#6b4247',
'vomit yellow': '#c7c10c',
'pale cyan': '#b7fffa',
'key lime': '#aeff6e',
'tomato red': '#ec2d01',
'lightgreen': '#76ff7b',
'merlot': '#730039',
'night blue': '#040348',
'purpleish pink': '#df4ec8',
'apple': '#6ecb3c',
'baby poop green': '#8f9805',
'green apple': '#5edc1f',
'heliotrope': '#d94ff5',
'yellow/green': '#c8fd3d',
'almost black': '#070d0d',
'cool blue': '#4984b8',
'leafy green': '#51b73b',
'mustard brown': '#ac7e04',
'dusk': '#4e5481',
'dull brown': '#876e4b',
'frog green': '#58bc08',
'vivid green': '#2fef10',
'bright light green': '#2dfe54',
'fluro green': '#0aff02',
'kiwi': '#9cef43',
'seaweed': '#18d17b',
'navy green': '#35530a',
'ultramarine blue': '#1805db',
'iris': '#6258c4',
'pastel orange': '#ff964f',
'yellowish orange': '#ffab0f',
'perrywinkle': '#8f8ce7',
'tealish': '#24bca8',
'dark plum': '#3f012c',
'pear': '#cbf85f',
'pinkish orange': '#ff724c',
'midnight purple': '#280137',
'light urple': '#b36ff6',
'dark mint': '#48c072',
'greenish tan': '#bccb7a',
'light burgundy': '#a8415b',
'turquoise blue': '#06b1c4',
'ugly pink': '#cd7584',
'sandy': '#f1da7a',
'electric pink': '#ff0490',
'muted purple': '#805b87',
'mid green': '#50a747',
'greyish': '#a8a495',
'neon yellow': '#cfff04',
'banana': '#ffff7e',
'carnation pink': '#ff7fa7',
'tomato': '#ef4026',
'sea': '#3c9992',
'muddy brown': '#886806',
'turquoise green': '#04f489',
'buff': '#fef69e',
'fawn': '#cfaf7b',
'muted blue': '#3b719f',
'pale rose': '#fdc1c5',
'dark mint green': '#20c073',
'amethyst': '#9b5fc0',
'blue/green': '#0f9b8e',
'chestnut': '#742802',
'sick green': '#9db92c',
'pea': '#a4bf20',
'rusty orange': '#cd5909',
'stone': '#ada587',
'rose red': '#be013c',
'pale aqua': '#b8ffeb',
'deep orange': '#dc4d01',
'earth': '#a2653e',
'mossy green': '#638b27',
'grassy green': '#419c03',
'pale lime green': '#b1ff65',
'light grey blue': '#9dbcd4',
'pale grey': '#fdfdfe',
'asparagus': '#77ab56',
'blueberry': '#464196',
'purple red': '#990147',
'pale lime': '#befd73',
'greenish teal': '#32bf84',
'caramel': '#af6f09',
'deep magenta': '#a0025c',
'light peach': '#ffd8b1',
'milk chocolate': '#7f4e1e',
'ocher': '#bf9b0c',
'off green': '#6ba353',
'purply pink': '#f075e6',
'lightblue': '#7bc8f6',
'dusky blue': '#475f94',
'golden': '#f5bf03',
'light beige': '#fffeb6',
'butter yellow': '#fffd74',
'dusky purple': '#895b7b',
'french blue': '#436bad',
'ugly yellow': '#d0c101',
'greeny yellow': '#c6f808',
'orangish red': '#f43605',
'shamrock green': '#02c14d',
'orangish brown': '#b25f03',
'tree green': '#2a7e19',
'deep violet': '#490648',
'gunmetal': '#536267',
'blue/purple': '#5a06ef',
'cherry': '#cf0234',
'sandy brown': '#c4a661',
'warm grey': '#978a84',
'dark indigo': '#1f0954',
'midnight': '#03012d',
'bluey green': '#2bb179',
'grey pink': '#c3909b',
'soft purple': '#a66fb5',
'blood': '#770001',
'brown red': '#922b05',
'medium grey': '#7d7f7c',
'berry': '#990f4b',
'poo': '#8f7303',
'purpley pink': '#c83cb9',
'light salmon': '#fea993',
'snot': '#acbb0d',
'easter purple': '#c071fe',
'light yellow green': '#ccfd7f',
'dark navy blue': '#00022e',
'drab': '#828344',
'light rose': '#ffc5cb',
'rouge': '#ab1239',
'purplish red': '#b0054b',
'slime green': '#99cc04',
'baby poop': '#937c00',
'irish green': '#019529',
'pink/purple': '#ef1de7',
'dark navy': '#000435',
'greeny blue': '#42b395',
'light plum': '#9d5783',
'pinkish grey': '#c8aca9',
'dirty orange': '#c87606',
'rust red': '#aa2704',
'pale lilac': '#e4cbff',
'orangey red': '#fa4224',
'primary blue': '#0804f9',
'kermit green': '#5cb200',
'brownish purple': '#76424e',
'murky green': '#6c7a0e',
'wheat': '#fbdd7e',
'very dark purple': '#2a0134',
'bottle green': '#044a05',
'watermelon': '#fd4659',
'deep sky blue': '#0d75f8',
'fire engine red': '#fe0002',
'yellow ochre': '#cb9d06',
'pumpkin orange': '#fb7d07',
'pale olive': '#b9cc81',
'light lilac': '#edc8ff',
'lightish green': '#61e160',
'carolina blue': '#8ab8fe',
'mulberry': '#920a4e',
'shocking pink': '#fe02a2',
'auburn': '#9a3001',
'bright lime green': '#65fe08',
'celadon': '#befdb7',
'pinkish brown': '#b17261',
'poo brown': '#885f01',
'bright sky blue': '#02ccfe',
'celery': '#c1fd95',
'dirt brown': '#836539',
'strawberry': '#fb2943',
'dark lime': '#84b701',
'copper': '#b66325',
'medium brown': '#7f5112',
'muted green': '#5fa052',
"robin's egg": '#6dedfd',
'bright aqua': '#0bf9ea',
'bright lavender': '#c760ff',
'ivory': '#ffffcb',
'very light purple': '#f6cefc',
'light navy': '#155084',
'pink red': '#f5054f',
'olive brown': '#645403',
'poop brown': '#7a5901',
'mustard green': '#a8b504',
'ocean green': '#3d9973',
'very dark blue': '#000133',
'dusty green': '#76a973',
'light navy blue': '#2e5a88',
'minty green': '#0bf77d',
'adobe': '#bd6c48',
'barney': '#ac1db8',
'jade green': '#2baf6a',
'bright light blue': '#26f7fd',
'light lime': '#aefd6c',
'dark khaki': '#9b8f55',
'orange yellow': '#ffad01',
'ocre': '#c69c04',
'maize': '#f4d054',
'faded pink': '#de9dac',
'british racing green': '#05480d',
'sandstone': '#c9ae74',
'mud brown': '#60460f',
'light sea green': '#98f6b0',
'robin egg blue': '#8af1fe',
'aqua marine': '#2ee8bb',
'dark sea green': '#11875d',
'soft pink': '#fdb0c0',
'orangey brown': '#b16002',
'cherry red': '#f7022a',
'burnt yellow': '#d5ab09',
'brownish grey': '#86775f',
'camel': '#c69f59',
'purplish grey': '#7a687f',
'marine': '#042e60',
'greyish pink': '#c88d94',
'pale turquoise': '#a5fbd5',
'pastel yellow': '#fffe71',
'bluey purple': '#6241c7',
'canary yellow': '#fffe40',
'faded red': '#d3494e',
'sepia': '#985e2b',
'coffee': '#a6814c',
'bright magenta': '#ff08e8',
'mocha': '#9d7651',
'ecru': '#feffca',
'purpleish': '#98568d',
'cranberry': '#9e003a',
'darkish green': '#287c37',
'brown orange': '#b96902',
'dusky rose': '#ba6873',
'melon': '#ff7855',
'sickly green': '#94b21c',
'silver': '#c5c9c7',
'purply blue': '#661aee',
'purpleish blue': '#6140ef',
'hospital green': '#9be5aa',
'shit brown': '#7b5804',
'mid blue': '#276ab3',
'amber': '#feb308',
'easter green': '#8cfd7e',
'soft blue': '#6488ea',
'cerulean blue': '#056eee',
'golden brown': '#b27a01',
'bright turquoise': '#0ffef9',
'red pink': '#fa2a55',
'red purple': '#820747',
'greyish brown': '#7a6a4f',
'vermillion': '#f4320c',
'russet': '#a13905',
'steel grey': '#6f828a',
'lighter purple': '#a55af4',
'bright violet': '#ad0afd',
'prussian blue': '#004577',
'slate green': '#658d6d',
'dirty pink': '#ca7b80',
'dark blue green': '#005249',
'pine': '#2b5d34',
'yellowy green': '#bff128',
'dark gold': '#b59410',
'bluish': '#2976bb',
'darkish blue': '#014182',
'dull red': '#bb3f3f',
'pinky red': '#fc2647',
'bronze': '#a87900',
'pale teal': '#82cbb2',
'military green': '#667c3e',
'barbie pink': '#fe46a5',
'bubblegum pink': '#fe83cc',
'pea soup green': '#94a617',
'dark mustard': '#a88905',
'shit': '#7f5f00',
'medium purple': '#9e43a2',
'very dark green': '#062e03',
'dirt': '#8a6e45',
'dusky pink': '#cc7a8b',
'red violet': '#9e0168',
'lemon yellow': '#fdff38',
'pistachio': '#c0fa8b',
'dull yellow': '#eedc5b',
'dark lime green': '#7ebd01',
'denim blue': '#3b5b92',
'teal blue': '#01889f',
'lightish blue': '#3d7afd',
'purpley blue': '#5f34e7',
'light indigo': '#6d5acf',
'swamp green': '#748500',
'brown green': '#706c11',
'dark maroon': '#3c0008',
'hot purple': '#cb00f5',
'dark forest green': '#002d04',
'faded blue': '#658cbb',
'drab green': '#749551',
'light lime green': '#b9ff66',
'snot green': '#9dc100',
'yellowish': '#faee66',
'light blue green': '#7efbb3',
'bordeaux': '#7b002c',
'light mauve': '#c292a1',
'ocean': '#017b92',
'marigold': '#fcc006',
'muddy green': '#657432',
'dull orange': '#d8863b',
'steel': '#738595',
'electric purple': '#aa23ff',
'fluorescent green': '#08ff08',
'yellowish brown': '#9b7a01',
'blush': '#f29e8e',
'soft green': '#6fc276',
'bright orange': '#ff5b00',
'lemon': '#fdff52',
'purple grey': '#866f85',
'acid green': '#8ffe09',
'pale lavender': '#eecffe',
'violet blue': '#510ac9',
'light forest green': '#4f9153',
'burnt red': '#9f2305',
'khaki green': '#728639',
'cerise': '#de0c62',
'faded purple': '#916e99',
'apricot': '#ffb16d',
'dark olive green': '#3c4d03',
'grey brown': '#7f7053',
'green grey': '#77926f',
'true blue': '#010fcc',
'pale violet': '#ceaefa',
'periwinkle blue': '#8f99fb',
'light sky blue': '#c6fcff',
'blurple': '#5539cc',
'green brown': '#544e03',
'bluegreen': '#017a79',
'bright teal': '#01f9c6',
'brownish yellow': '#c9b003',
'pea soup': '#929901',
'forest': '#0b5509',
'barney purple': '#a00498',
'ultramarine': '#2000b1',
'purplish': '#94568c',
'puke yellow': '#c2be0e',
'bluish grey': '#748b97',
'dark periwinkle': '#665fd1',
'dark lilac': '#9c6da5',
'reddish': '#c44240',
'light maroon': '#a24857',
'dusty purple': '#825f87',
'terra cotta': '#c9643b',
'avocado': '#90b134',
'marine blue': '#01386a',
'teal green': '#25a36f',
'slate grey': '#59656d',
'lighter green': '#75fd63',
'electric green': '#21fc0d',
'dusty blue': '#5a86ad',
'golden yellow': '#fec615',
'bright yellow': '#fffd01',
'light lavender': '#dfc5fe',
'umber': '#b26400',
'poop': '#7f5e00',
'dark peach': '#de7e5d',
'jungle green': '#048243',
'eggshell': '#ffffd4',
'denim': '#3b638c',
'yellow brown': '#b79400',
'dull purple': '#84597e',
'chocolate brown': '#411900',
'wine red': '#7b0323',
'neon blue': '#04d9ff',
'dirty green': '#667e2c',
'light tan': '#fbeeac',
'ice blue': '#d7fffe',
'cadet blue': '#4e7496',
'dark mauve': '#874c62',
'very light blue': '#d5ffff',
'grey purple': '#826d8c',
'pastel pink': '#ffbacd',
'very light green': '#d1ffbd',
'dark sky blue': '#448ee4',
'evergreen': '#05472a',
'dull pink': '#d5869d',
'aubergine': '#3d0734',
'mahogany': '#4a0100',
'reddish orange': '#f8481c',
'deep green': '#02590f',
'vomit green': '#89a203',
'purple pink': '#e03fd8',
'dusty pink': '#d58a94',
'faded green': '#7bb274',
'camo green': '#526525',
'pinky purple': '#c94cbe',
'pink purple': '#db4bda',
'brownish red': '#9e3623',
'dark rose': '#b5485d',
'mud': '#735c12',
'brownish': '#9c6d57',
'emerald green': '#028f1e',
'pale brown': '#b1916e',
'dull blue': '#49759c',
'burnt umber': '#a0450e',
'medium green': '#39ad48',
'clay': '#b66a50',
'light aqua': '#8cffdb',
'light olive green': '#a4be5c',
'brownish orange': '#cb7723',
'dark aqua': '#05696b',
'purplish pink': '#ce5dae',
'dark salmon': '#c85a53',
'greenish grey': '#96ae8d',
'jade': '#1fa774',
'ugly green': '#7a9703',
'dark beige': '#ac9362',
'emerald': '#01a049',
'pale red': '#d9544d',
'light magenta': '#fa5ff7',
'sky': '#82cafc',
'light cyan': '#acfffc',
'yellow orange': '#fcb001',
'reddish purple': '#910951',
'reddish pink': '#fe2c54',
'orchid': '#c875c4',
'dirty yellow': '#cdc50a',
'orange red': '#fd411e',
'deep red': '#9a0200',
'orange brown': '#be6400',
'cobalt blue': '#030aa7',
'neon pink': '#fe019a',
'rose pink': '#f7879a',
'greyish purple': '#887191',
'raspberry': '#b00149',
'aqua green': '#12e193',
'salmon pink': '#fe7b7c',
'tangerine': '#ff9408',
'brownish green': '#6a6e09',
'red brown': '#8b2e16',
'greenish brown': '#696112',
'pumpkin': '#e17701',
'pine green': '#0a481e',
'charcoal': '#343837',
'baby pink': '#ffb7ce',
'cornflower': '#6a79f7',
'blue violet': '#5d06e9',
'chocolate': '#3d1c02',
'greyish green': '#82a67d',
'scarlet': '#be0119',
'green yellow': '#c9ff27',
'dark olive': '#373e02',
'sienna': '#a9561e',
'pastel purple': '#caa0ff',
'terracotta': '#ca6641',
'aqua blue': '#02d8e9',
'sage green': '#88b378',
'blood red': '#980002',
'deep pink': '#cb0162',
'grass': '#5cac2d',
'moss': '#769958',
'pastel blue': '#a2bffe',
'bluish green': '#10a674',
'green blue': '#06b48b',
'dark tan': '#af884a',
'greenish blue': '#0b8b87',
'pale orange': '#ffa756',
'vomit': '#a2a415',
'forrest green': '#154406',
'dark lavender': '#856798',
'dark violet': '#34013f',
'purple blue': '#632de9',
'dark cyan': '#0a888a',
'olive drab': '#6f7632',
'pinkish': '#d46a7e',
'cobalt': '#1e488f',
'neon purple': '#bc13fe',
'light turquoise': '#7ef4cc',
'apple green': '#76cd26',
'dull green': '#74a662',
'wine': '#80013f',
'powder blue': '#b1d1fc',
'off white': '#ffffe4',
'electric blue': '#0652ff',
'dark turquoise': '#045c5a',
'blue purple': '#5729ce',
'azure': '#069af3',
'bright red': '#ff000d',
'pinkish red': '#f10c45',
'cornflower blue': '#5170d7',
'light olive': '#acbf69',
'grape': '#6c3461',
'greyish blue': '#5e819d',
'purplish blue': '#601ef9',
'yellowish green': '#b0dd16',
'greenish yellow': '#cdfd02',
'medium blue': '#2c6fbb',
'dusty rose': '#c0737a',
'light violet': '#d6b4fc',
'midnight blue': '#020035',
'bluish purple': '#703be7',
'red orange': '#fd3c06',
'dark magenta': '#960056',
'greenish': '#40a368',
'ocean blue': '#03719c',
'coral': '#fc5a50',
'cream': '#ffffc2',
'reddish brown': '#7f2b0a',
'burnt sienna': '#b04e0f',
'brick': '#a03623',
'sage': '#87ae73',
'grey green': '#789b73',
'white': '#ffffff',
"robin's egg blue": '#98eff9',
'moss green': '#658b38',
'steel blue': '#5a7d9a',
'eggplant': '#380835',
'light yellow': '#fffe7a',
'leaf green': '#5ca904',
'light grey': '#d8dcd6',
'puke': '#a5a502',
'pinkish purple': '#d648d7',
'sea blue': '#047495',
'pale purple': '#b790d4',
'slate blue': '#5b7c99',
'blue grey': '#607c8e',
'hunter green': '#0b4008',
'fuchsia': '#ed0dd9',
'crimson': '#8c000f',
'pale yellow': '#ffff84',
'ochre': '#bf9005',
'mustard yellow': '#d2bd0a',
'light red': '#ff474c',
'cerulean': '#0485d1',
'pale pink': '#ffcfdc',
'deep blue': '#040273',
'rust': '#a83c09',
'light teal': '#90e4c1',
'slate': '#516572',
'goldenrod': '#fac205',
'dark yellow': '#d5b60a',
'dark grey': '#363737',
'army green': '#4b5d16',
'grey blue': '#6b8ba4',
'seafoam': '#80f9ad',
'puce': '#a57e52',
'spring green': '#a9f971',
'dark orange': '#c65102',
'sand': '#e2ca76',
'pastel green': '#b0ff9d',
'mint': '#9ffeb0',
'light orange': '#fdaa48',
'bright pink': '#fe01b1',
'chartreuse': '#c1f80a',
'deep purple': '#36013f',
'dark brown': '#341c02',
'taupe': '#b9a281',
'pea green': '#8eab12',
'puke green': '#9aae07',
'kelly green': '#02ab2e',
'seafoam green': '#7af9ab',
'blue green': '#137e6d',
'khaki': '#aaa662',
'burgundy': '#610023',
'dark teal': '#014d4e',
'brick red': '#8f1402',
'royal purple': '#4b006e',
'plum': '#580f41',
'mint green': '#8fff9f',
'gold': '#dbb40c',
'baby blue': '#a2cffe',
'yellow green': '#c0fb2d',
'bright purple': '#be03fd',
'dark red': '#840000',
'pale blue': '#d0fefe',
'grass green': '#3f9b0b',
'navy': '#01153e',
'aquamarine': '#04d8b2',
'burnt orange': '#c04e01',
'neon green': '#0cff0c',
'bright blue': '#0165fc',
'rose': '#cf6275',
'light pink': '#ffd1df',
'mustard': '#ceb301',
'indigo': '#380282',
'lime': '#aaff32',
'sea green': '#53fca1',
'periwinkle': '#8e82fe',
'dark pink': '#cb416b',
'olive green': '#677a04',
'peach': '#ffb07c',
'pale green': '#c7fdb5',
'light brown': '#ad8150',
'hot pink': '#ff028d',
'black': '#000000',
'lilac': '#cea2fd',
'navy blue': '#001146',
'royal blue': '#0504aa',
'beige': '#e6daa6',
'salmon': '#ff796c',
'olive': '#6e750e',
'maroon': '#650021',
'bright green': '#01ff07',
'dark purple': '#35063e',
'mauve': '#ae7181',
'forest green': '#06470c',
'aqua': '#13eac9',
'cyan': '#00ffff',
'tan': '#d1b26f',
'dark blue': '#00035b',
'lavender': '#c79fef',
'turquoise': '#06c2ac',
'dark green': '#033500',
'violet': '#9a0eea',
'light purple': '#bf77f6',
'lime green': '#89fe05',
'grey': '#929591',
'sky blue': '#75bbfd',
'yellow': '#ffff14',
'magenta': '#c20078',
'light green': '#96f97b',
'orange': '#f97306',
'teal': '#029386',
'light blue': '#95d0fc',
'red': '#e50000',
'brown': '#653700',
'pink': '#ff81c0',
'blue': '#0343df',
'green': '#15b01a',
'purple': '#7e1e9c'}
# Normalize name to "xkcd:<name>" to avoid name collisions.
XKCD_COLORS = {'xkcd:' + name: value for name, value in XKCD_COLORS.items()}
# https://drafts.csswg.org/css-color-4/#named-colors
CSS4_COLORS = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkgrey': '#A9A9A9',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkslategrey': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'grey': '#808080',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgray': '#D3D3D3',
'lightgreen': '#90EE90',
'lightgrey': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'rebeccapurple': '#663399',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#F4A460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
|
gpl-3.0
| 554,228,655,366,989,200 | -3,427,199,862,643,688,000 | 29.423714 | 76 | 0.505989 | false |
jonathanverner/brython
|
www/speed/benchmarks/util.py
|
17
|
1778
|
"""Utility code for benchmark scripts."""
__author__ = "[email protected] (Collin Winter)"
import math
import operator
def run_benchmark(options, num_runs, bench_func, *args):
"""Run the given benchmark, print results to stdout.
Args:
options: optparse.Values instance.
num_runs: number of times to run the benchmark
bench_func: benchmark function. `num_runs, *args` will be passed to this
function. This should return a list of floats (benchmark execution
times).
"""
if options.profile:
import cProfile
prof = cProfile.Profile()
prof.runcall(bench_func, num_runs, *args)
prof.print_stats(sort=options.profile_sort)
else:
data = bench_func(num_runs, *args)
if options.take_geo_mean:
product = reduce(operator.mul, data, 1)
print(math.pow(product, 1.0 / len(data)))
else:
for x in data:
print(x)
def add_standard_options_to(parser):
"""Add a bunch of common command-line flags to an existing OptionParser.
This function operates on `parser` in-place.
Args:
parser: optparse.OptionParser instance.
"""
parser.add_option("-n", action="store", type="int", default=100,
dest="num_runs", help="Number of times to run the test.")
parser.add_option("--profile", action="store_true",
help="Run the benchmark through cProfile.")
parser.add_option("--profile_sort", action="store", type="str",
default="time", help="Column to sort cProfile output by.")
parser.add_option("--take_geo_mean", action="store_true",
help="Return the geo mean, rather than individual data.")
|
bsd-3-clause
| 4,585,883,806,021,926,400 | -6,575,127,504,463,634,000 | 35.285714 | 80 | 0.611361 | false |
darribas/pysal
|
pysal/__init__.py
|
5
|
3399
|
"""
Python Spatial Analysis Library
===============================
Documentation
-------------
PySAL documentation is available in two forms: python docstrings and an html \
webpage at http://pysal.org/
Available sub-packages
----------------------
cg
Basic data structures and tools for Computational Geometry
core
Basic functions used by several sub-packages
esda
Tools for Exploratory Spatial Data Analysis
examples
Example data sets used by several sub-packages for examples and testing
network
Spatial analysis on networks
region
Regionalization algorithms and spatially constrained clustering
spatial_dynamics
Space-time exploratory methods and clustering
spreg
Spatial regression and econometrics
weights
Tools for creating and manipulating weights
contrib
Package for interfacing with third-party libraries
Utilities
---------
`fileio`_
Tool for file input and output, supports many well known file formats
"""
import pysal.cg
import pysal.core
from pysal.version import version
# toplevel imports to be explicit
from pysal.esda.moran import Moran, Moran_BV, Moran_BV_matrix, Moran_Local
from pysal.esda.geary import Geary
from pysal.esda.join_counts import Join_Counts
from pysal.esda.gamma import Gamma
from pysal.esda.getisord import G, G_Local
from pysal.esda.mapclassify import quantile, binC, bin, bin1d, Equal_Interval, \
Percentiles
from pysal.esda.mapclassify import Box_Plot, Quantiles, Std_Mean, Maximum_Breaks
from pysal.esda.mapclassify import Natural_Breaks, Fisher_Jenks, Jenks_Caspall
from pysal.esda.mapclassify import Jenks_Caspall_Sampled, Jenks_Caspall_Forced
from pysal.esda.mapclassify import User_Defined, Max_P_Classifier, gadf
from pysal.esda.mapclassify import K_classifiers
from pysal.inequality.theil import Theil, TheilD, TheilDSim
from pysal.region.maxp import Maxp, Maxp_LISA
from pysal.spatial_dynamics import Markov, Spatial_Markov, LISA_Markov, \
SpatialTau, Theta, Tau
from pysal.spatial_dynamics import ergodic
from pysal.spatial_dynamics import directional
from pysal.weights import W, lat2W, block_weights, comb, full, shimbel, \
order, higher_order, higher_order_sp, remap_ids, hexLat2W, WSP, regime_weights
from pysal.weights.Distance import knnW, Kernel, DistanceBand
from pysal.weights.Contiguity import buildContiguity
from pysal.weights.spatial_lag import lag_spatial
from pysal.weights.Wsets import w_union, w_intersection, w_difference
from pysal.weights.Wsets import w_symmetric_difference, w_subset
from pysal.weights.user import queen_from_shapefile, rook_from_shapefile, \
knnW_from_array, knnW_from_shapefile, threshold_binaryW_from_array,\
threshold_binaryW_from_shapefile, threshold_continuousW_from_array,\
threshold_continuousW_from_shapefile, kernelW, kernelW_from_shapefile,\
adaptive_kernelW, adaptive_kernelW_from_shapefile,\
min_threshold_dist_from_shapefile, build_lattice_shapefile
from pysal.core.util.weight_converter import weight_convert
import pysal.spreg
import pysal.examples
from pysal.network.network import Network, NetworkG, NetworkK, NetworkF
# Load the IOHandlers
from pysal.core import IOHandlers
# Assign pysal.open to dispatcher
open = pysal.core.FileIO.FileIO
#__all__=[]
#import esda,weights
#__all__+=esda.__all__
#__all__+=weights.__all__
# Constants
MISSINGVALUE = None # used by fileIO to flag missing values.
|
bsd-3-clause
| -6,872,224,937,129,561,000 | 7,722,746,020,309,647,000 | 35.159574 | 82 | 0.775228 | false |
Pulgama/supriya
|
etc/pending_ugens/TGrains.py
|
1
|
8796
|
import collections
from supriya.enums import CalculationRate
from supriya.ugens.MultiOutUGen import MultiOutUGen
class TGrains(MultiOutUGen):
"""
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains
TGrains.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'channel_count',
'trigger',
'buffer_id',
'rate',
'center_pos',
'duration',
'pan',
'amp',
'interpolate',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
amp=0.1,
buffer_id=0,
center_pos=0,
channel_count=None,
duration=0.1,
interpolate=4,
pan=0,
rate=1,
trigger=0,
):
MultiOutUGen.__init__(
self,
calculation_rate=calculation_rate,
amp=amp,
buffer_id=buffer_id,
center_pos=center_pos,
channel_count=channel_count,
duration=duration,
interpolate=interpolate,
pan=pan,
rate=rate,
trigger=trigger,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
amp=0.1,
buffer_id=0,
center_pos=0,
channel_count=None,
duration=0.1,
interpolate=4,
pan=0,
rate=1,
trigger=0,
):
"""
Constructs an audio-rate TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains
TGrains.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
amp=amp,
buffer_id=buffer_id,
center_pos=center_pos,
channel_count=channel_count,
duration=duration,
interpolate=interpolate,
pan=pan,
rate=rate,
trigger=trigger,
)
return ugen
# def newFromDesc(): ...
### PUBLIC PROPERTIES ###
@property
def amp(self):
"""
Gets `amp` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.amp
0.1
Returns ugen input.
"""
index = self._ordered_input_names.index('amp')
return self._inputs[index]
@property
def buffer_id(self):
"""
Gets `buffer_id` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.buffer_id
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('buffer_id')
return self._inputs[index]
@property
def center_pos(self):
"""
Gets `center_pos` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.center_pos
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('center_pos')
return self._inputs[index]
@property
def channel_count(self):
"""
Gets `channel_count` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.channel_count
Returns ugen input.
"""
index = self._ordered_input_names.index('channel_count')
return self._inputs[index]
@property
def duration(self):
"""
Gets `duration` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.duration
0.1
Returns ugen input.
"""
index = self._ordered_input_names.index('duration')
return self._inputs[index]
@property
def interpolate(self):
"""
Gets `interpolate` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.interpolate
4.0
Returns ugen input.
"""
index = self._ordered_input_names.index('interpolate')
return self._inputs[index]
@property
def pan(self):
"""
Gets `pan` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.pan
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('pan')
return self._inputs[index]
@property
def rate(self):
"""
Gets `rate` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.rate
1.0
Returns ugen input.
"""
index = self._ordered_input_names.index('rate')
return self._inputs[index]
@property
def trigger(self):
"""
Gets `trigger` input of TGrains.
::
>>> tgrains = supriya.ugens.TGrains.ar(
... amp=0.1,
... buffer_id=0,
... center_pos=0,
... channel_count=channel_count,
... duration=0.1,
... interpolate=4,
... pan=0,
... rate=1,
... trigger=0,
... )
>>> tgrains.trigger
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('trigger')
return self._inputs[index]
|
mit
| 3,776,149,195,193,702,400 | 1,278,944,086,584,747,500 | 23.501393 | 64 | 0.403479 | false |
jankoslavic/numpy
|
numpy/f2py/capi_maps.py
|
17
|
29404
|
#!/usr/bin/env python
"""
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.60 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import copy
import re
import os
import sys
from .auxfuncs import *
from .crackfortran import markoutercomma
from . import cb_rules
# Numarray and Numeric users should set this False
using_newcore = True
depargs=[]
lcb_map={}
lcb2_map={}
# forced casting: mainly caused by the fact that Python or Numeric
# C/APIs do not support the corresponding C types.
c2py_map={'double': 'float',
'float': 'float', # forced casting
'long_double': 'float', # forced casting
'char': 'int', # forced casting
'signed_char': 'int', # forced casting
'unsigned_char': 'int', # forced casting
'short': 'int', # forced casting
'unsigned_short': 'int', # forced casting
'int': 'int', # (forced casting)
'long': 'int',
'long_long': 'long',
'unsigned': 'int', # forced casting
'complex_float': 'complex', # forced casting
'complex_double': 'complex',
'complex_long_double': 'complex', # forced casting
'string': 'string',
}
c2capi_map={'double':'NPY_DOUBLE',
'float':'NPY_FLOAT',
'long_double':'NPY_DOUBLE', # forced casting
'char':'NPY_CHAR',
'unsigned_char':'NPY_UBYTE',
'signed_char':'NPY_BYTE',
'short':'NPY_SHORT',
'unsigned_short':'NPY_USHORT',
'int':'NPY_INT',
'unsigned':'NPY_UINT',
'long':'NPY_LONG',
'long_long':'NPY_LONG', # forced casting
'complex_float':'NPY_CFLOAT',
'complex_double':'NPY_CDOUBLE',
'complex_long_double':'NPY_CDOUBLE', # forced casting
'string':'NPY_CHAR'}
#These new maps aren't used anyhere yet, but should be by default
# unless building numeric or numarray extensions.
if using_newcore:
c2capi_map={'double': 'NPY_DOUBLE',
'float': 'NPY_FLOAT',
'long_double': 'NPY_LONGDOUBLE',
'char': 'NPY_BYTE',
'unsigned_char': 'NPY_UBYTE',
'signed_char': 'NPY_BYTE',
'short': 'NPY_SHORT',
'unsigned_short': 'NPY_USHORT',
'int': 'NPY_INT',
'unsigned': 'NPY_UINT',
'long': 'NPY_LONG',
'unsigned_long': 'NPY_ULONG',
'long_long': 'NPY_LONGLONG',
'unsigned_long_long': 'NPY_ULONGLONG',
'complex_float': 'NPY_CFLOAT',
'complex_double': 'NPY_CDOUBLE',
'complex_long_double': 'NPY_CDOUBLE',
'string': 'NPY_CHAR', # f2py 2e is not ready for NPY_STRING (must set itemisize etc)
#'string':'NPY_STRING'
}
c2pycode_map={'double':'d',
'float':'f',
'long_double':'d', # forced casting
'char':'1',
'signed_char':'1',
'unsigned_char':'b',
'short':'s',
'unsigned_short':'w',
'int':'i',
'unsigned':'u',
'long':'l',
'long_long':'L',
'complex_float':'F',
'complex_double':'D',
'complex_long_double':'D', # forced casting
'string':'c'
}
if using_newcore:
c2pycode_map={'double':'d',
'float':'f',
'long_double':'g',
'char':'b',
'unsigned_char':'B',
'signed_char':'b',
'short':'h',
'unsigned_short':'H',
'int':'i',
'unsigned':'I',
'long':'l',
'unsigned_long':'L',
'long_long':'q',
'unsigned_long_long':'Q',
'complex_float':'F',
'complex_double':'D',
'complex_long_double':'G',
'string':'S'}
c2buildvalue_map={'double':'d',
'float':'f',
'char':'b',
'signed_char':'b',
'short':'h',
'int':'i',
'long':'l',
'long_long':'L',
'complex_float':'N',
'complex_double':'N',
'complex_long_double':'N',
'string':'z'}
if sys.version_info[0] >= 3:
# Bytes, not Unicode strings
c2buildvalue_map['string'] = 'y'
if using_newcore:
#c2buildvalue_map=???
pass
f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'},
'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long',
'-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned',
'-8':'unsigned_long_long'},
'complex':{'':'complex_float','8':'complex_float',
'16':'complex_double','24':'complex_long_double',
'32':'complex_long_double'},
'complexkind':{'':'complex_float','4':'complex_float',
'8':'complex_double','12':'complex_long_double',
'16':'complex_long_double'},
'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'},
'double complex':{'':'complex_double'},
'double precision':{'':'double'},
'byte':{'':'char'},
'character':{'':'string'}
}
if os.path.isfile('.f2py_f2cmap'):
# User defined additions to f2cmap_all.
# .f2py_f2cmap must contain a dictionary of dictionaries, only.
# For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is
# interpreted as C 'float'.
# This feature is useful for F90/95 users if they use PARAMETERSs
# in type specifications.
try:
outmess('Reading .f2py_f2cmap ...\n')
f = open('.f2py_f2cmap', 'r')
d = eval(f.read(), {}, {})
f.close()
for k, d1 in list(d.items()):
for k1 in list(d1.keys()):
d1[k1.lower()] = d1[k1]
d[k.lower()] = d[k]
for k in list(d.keys()):
if k not in f2cmap_all:
f2cmap_all[k]={}
for k1 in list(d[k].keys()):
if d[k][k1] in c2py_map:
if k1 in f2cmap_all[k]:
outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k, k1, f2cmap_all[k][k1], d[k][k1]))
f2cmap_all[k][k1] = d[k][k1]
outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, d[k][k1]))
else:
errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k, k1, d[k][k1], d[k][k1], list(c2py_map.keys())))
outmess('Succesfully applied user defined changes from .f2py_f2cmap\n')
except Exception as msg:
errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg))
cformat_map={'double': '%g',
'float': '%g',
'long_double': '%Lg',
'char': '%d',
'signed_char': '%d',
'unsigned_char': '%hhu',
'short': '%hd',
'unsigned_short': '%hu',
'int': '%d',
'unsigned': '%u',
'long': '%ld',
'unsigned_long': '%lu',
'long_long': '%ld',
'complex_float': '(%g,%g)',
'complex_double': '(%g,%g)',
'complex_long_double': '(%Lg,%Lg)',
'string': '%s',
}
############### Auxiliary functions
def getctype(var):
"""
Determines C type
"""
ctype='void'
if isfunction(var):
if 'result' in var:
a=var['result']
else:
a=var['name']
if a in var['vars']:
return getctype(var['vars'][a])
else:
errmess('getctype: function %s has no return value?!\n'%a)
elif issubroutine(var):
return ctype
elif 'typespec' in var and var['typespec'].lower() in f2cmap_all:
typespec = var['typespec'].lower()
f2cmap=f2cmap_all[typespec]
ctype=f2cmap[''] # default type
if 'kindselector' in var:
if '*' in var['kindselector']:
try:
ctype=f2cmap[var['kindselector']['*']]
except KeyError:
errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'], '*', var['kindselector']['*']))
elif 'kind' in var['kindselector']:
if typespec+'kind' in f2cmap_all:
f2cmap=f2cmap_all[typespec+'kind']
try:
ctype=f2cmap[var['kindselector']['kind']]
except KeyError:
if typespec in f2cmap_all:
f2cmap=f2cmap_all[typespec]
try:
ctype=f2cmap[str(var['kindselector']['kind'])]
except KeyError:
errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'\
%(typespec, var['kindselector']['kind'], ctype,
typespec, var['kindselector']['kind'], os.getcwd()))
else:
if not isexternal(var):
errmess('getctype: No C-type found in "%s", assuming void.\n'%var)
return ctype
def getstrlength(var):
if isstringfunction(var):
if 'result' in var:
a=var['result']
else:
a=var['name']
if a in var['vars']:
return getstrlength(var['vars'][a])
else:
errmess('getstrlength: function %s has no return value?!\n'%a)
if not isstring(var):
errmess('getstrlength: expected a signature of a string but got: %s\n'%(repr(var)))
len='1'
if 'charselector' in var:
a=var['charselector']
if '*' in a:
len=a['*']
elif 'len' in a:
len=a['len']
if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len):
#if len in ['(*)','*','(:)',':']:
if isintent_hide(var):
errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(repr(var)))
len='-1'
return len
def getarrdims(a,var,verbose=0):
global depargs
ret={}
if isstring(var) and not isarray(var):
ret['dims']=getstrlength(var)
ret['size']=ret['dims']
ret['rank']='1'
elif isscalar(var):
ret['size']='1'
ret['rank']='0'
ret['dims']=''
elif isarray(var):
# if not isintent_c(var):
# var['dimension'].reverse()
dim=copy.copy(var['dimension'])
ret['size']='*'.join(dim)
try: ret['size']=repr(eval(ret['size']))
except: pass
ret['dims']=','.join(dim)
ret['rank']=repr(len(dim))
ret['rank*[-1]']=repr(len(dim)*[-1])[1:-1]
for i in range(len(dim)): # solve dim for dependecies
v=[]
if dim[i] in depargs: v=[dim[i]]
else:
for va in depargs:
if re.match(r'.*?\b%s\b.*'%va, dim[i]):
v.append(va)
for va in v:
if depargs.index(va)>depargs.index(a):
dim[i]='*'
break
ret['setdims'], i='', -1
for d in dim:
i=i+1
if d not in ['*', ':', '(*)', '(:)']:
ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'], i, d)
if ret['setdims']: ret['setdims']=ret['setdims'][:-1]
ret['cbsetdims'], i='', -1
for d in var['dimension']:
i=i+1
if d not in ['*', ':', '(*)', '(:)']:
ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, d)
elif isintent_in(var):
outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \
% (d))
ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, 0)
elif verbose :
errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a), repr(d)))
if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1]
# if not isintent_c(var):
# var['dimension'].reverse()
return ret
def getpydocsign(a, var):
global lcb_map
if isfunction(var):
if 'result' in var:
af=var['result']
else:
af=var['name']
if af in var['vars']:
return getpydocsign(af, var['vars'][af])
else:
errmess('getctype: function %s has no return value?!\n'%af)
return '', ''
sig, sigout=a, a
opt=''
if isintent_in(var): opt='input'
elif isintent_inout(var): opt='in/output'
out_a = a
if isintent_out(var):
for k in var['intent']:
if k[:4]=='out=':
out_a = k[4:]
break
init=''
ctype=getctype(var)
if hasinitvalue(var):
init, showinit=getinit(a, var)
init = ', optional\\n Default: %s' % showinit
if isscalar(var):
if isintent_inout(var):
sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a, opt, c2py_map[ctype],
c2pycode_map[ctype], init)
else:
sig='%s : %s %s%s'%(a, opt, c2py_map[ctype], init)
sigout='%s : %s'%(out_a, c2py_map[ctype])
elif isstring(var):
if isintent_inout(var):
sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a, opt, getstrlength(var), init)
else:
sig='%s : %s string(len=%s)%s'%(a, opt, getstrlength(var), init)
sigout='%s : string(len=%s)'%(out_a, getstrlength(var))
elif isarray(var):
dim=var['dimension']
rank=repr(len(dim))
sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a, opt, rank,
c2pycode_map[ctype],
','.join(dim), init)
if a==out_a:
sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\
%(a, rank, c2pycode_map[ctype], ','.join(dim))
else:
sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\
%(out_a, rank, c2pycode_map[ctype], ','.join(dim), a)
elif isexternal(var):
ua=''
if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]:
ua=lcb2_map[lcb_map[a]]['argname']
if not ua==a: ua=' => %s'%ua
else: ua=''
sig='%s : call-back function%s'%(a, ua)
sigout=sig
else:
errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a)
return sig, sigout
def getarrdocsign(a, var):
ctype=getctype(var)
if isstring(var) and (not isarray(var)):
sig='%s : rank-0 array(string(len=%s),\'c\')'%(a, getstrlength(var))
elif isscalar(var):
sig='%s : rank-0 array(%s,\'%s\')'%(a, c2py_map[ctype],
c2pycode_map[ctype],)
elif isarray(var):
dim=var['dimension']
rank=repr(len(dim))
sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a, rank,
c2pycode_map[ctype],
','.join(dim))
return sig
def getinit(a, var):
if isstring(var): init, showinit='""', "''"
else: init, showinit='', ''
if hasinitvalue(var):
init=var['=']
showinit=init
if iscomplex(var) or iscomplexarray(var):
ret={}
try:
v = var["="]
if ',' in v:
ret['init.r'], ret['init.i']=markoutercomma(v[1:-1]).split('@,@')
else:
v = eval(v, {}, {})
ret['init.r'], ret['init.i']=str(v.real), str(v.imag)
except:
raise ValueError('getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a))
if isarray(var):
init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'], ret['init.i'])
elif isstring(var):
if not init: init, showinit='""', "''"
if init[0]=="'":
init='"%s"'%(init[1:-1].replace('"', '\\"'))
if init[0]=='"': showinit="'%s'"%(init[1:-1])
return init, showinit
def sign2map(a, var):
"""
varname,ctype,atype
init,init.r,init.i,pytype
vardebuginfo,vardebugshowvalue,varshowvalue
varrfromat
intent
"""
global lcb_map, cb_map
out_a = a
if isintent_out(var):
for k in var['intent']:
if k[:4]=='out=':
out_a = k[4:]
break
ret={'varname':a,'outvarname':out_a}
ret['ctype']=getctype(var)
intent_flags = []
for f, s in isintent_dict.items():
if f(var): intent_flags.append('F2PY_%s'%s)
if intent_flags:
#XXX: Evaluate intent_flags here.
ret['intent'] = '|'.join(intent_flags)
else:
ret['intent'] = 'F2PY_INTENT_IN'
if isarray(var): ret['varrformat']='N'
elif ret['ctype'] in c2buildvalue_map:
ret['varrformat']=c2buildvalue_map[ret['ctype']]
else: ret['varrformat']='O'
ret['init'], ret['showinit']=getinit(a, var)
if hasinitvalue(var) and iscomplex(var) and not isarray(var):
ret['init.r'], ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@')
if isexternal(var):
ret['cbnamekey']=a
if a in lcb_map:
ret['cbname']=lcb_map[a]
ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs']
ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs']
ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr']
ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr']
else:
ret['cbname']=a
errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a, list(lcb_map.keys())))
if isstring(var):
ret['length']=getstrlength(var)
if isarray(var):
ret=dictappend(ret, getarrdims(a, var))
dim=copy.copy(var['dimension'])
if ret['ctype'] in c2capi_map:
ret['atype']=c2capi_map[ret['ctype']]
# Debug info
if debugcapi(var):
il=[isintent_in, 'input', isintent_out, 'output',
isintent_inout, 'inoutput', isrequired, 'required',
isoptional, 'optional', isintent_hide, 'hidden',
iscomplex, 'complex scalar',
l_and(isscalar, l_not(iscomplex)), 'scalar',
isstring, 'string', isarray, 'array',
iscomplexarray, 'complex array', isstringarray, 'string array',
iscomplexfunction, 'complex function',
l_and(isfunction, l_not(iscomplexfunction)), 'function',
isexternal, 'callback',
isintent_callback, 'callback',
isintent_aux, 'auxiliary',
#ismutable,'mutable',l_not(ismutable),'immutable',
]
rl=[]
for i in range(0, len(il), 2):
if il[i](var): rl.append(il[i+1])
if isstring(var):
rl.append('slen(%s)=%s'%(a, ret['length']))
if isarray(var):
# if not isintent_c(var):
# var['dimension'].reverse()
ddim=','.join(map(lambda x, y:'%s|%s'%(x, y), var['dimension'], dim))
rl.append('dims(%s)'%ddim)
# if not isintent_c(var):
# var['dimension'].reverse()
if isexternal(var):
ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a, ret['cbname'], ','.join(rl))
else:
ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'], a, ret['showinit'], ','.join(rl))
if isscalar(var):
if ret['ctype'] in cformat_map:
ret['vardebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']])
if isstring(var):
ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a)
if isexternal(var):
ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a)
if ret['ctype'] in cformat_map:
ret['varshowvalue']='#name#:%s=%s'%(a, cformat_map[ret['ctype']])
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isstring(var):
ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a, a)
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var)
if hasnote(var):
ret['note']=var['note']
return ret
def routsign2map(rout):
"""
name,NAME,begintitle,endtitle
rname,ctype,rformat
routdebugshowvalue
"""
global lcb_map
name = rout['name']
fname = getfortranname(rout)
ret={'name': name,
'texname': name.replace('_', '\\_'),
'name_lower': name.lower(),
'NAME': name.upper(),
'begintitle': gentitle(name),
'endtitle': gentitle('end of %s'%name),
'fortranname': fname,
'FORTRANNAME': fname.upper(),
'callstatement': getcallstatement(rout) or '',
'usercode': getusercode(rout) or '',
'usercode1': getusercode1(rout) or '',
}
if '_' in fname:
ret['F_FUNC'] = 'F_FUNC_US'
else:
ret['F_FUNC'] = 'F_FUNC'
if '_' in name:
ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US'
else:
ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC'
lcb_map={}
if 'use' in rout:
for u in rout['use'].keys():
if u in cb_rules.cb_map:
for un in cb_rules.cb_map[u]:
ln=un[0]
if 'map' in rout['use'][u]:
for k in rout['use'][u]['map'].keys():
if rout['use'][u]['map'][k]==un[0]: ln=k;break
lcb_map[ln]=un[1]
#else:
# errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u))
elif 'externals' in rout and rout['externals']:
errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'], repr(rout['externals'])))
ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or ''
if isfunction(rout):
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
ret['rname']=a
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout)
ret['ctype']=getctype(rout['vars'][a])
if hasresultnote(rout):
ret['resultnote']=rout['vars'][a]['note']
rout['vars'][a]['note']=['See elsewhere.']
if ret['ctype'] in c2buildvalue_map:
ret['rformat']=c2buildvalue_map[ret['ctype']]
else:
ret['rformat']='O'
errmess('routsign2map: no c2buildvalue key for type %s\n'%(repr(ret['ctype'])))
if debugcapi(rout):
if ret['ctype'] in cformat_map:
ret['routdebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']])
if isstringfunction(rout):
ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a)
if isstringfunction(rout):
ret['rlength']=getstrlength(rout['vars'][a])
if ret['rlength']=='-1':
errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(repr(rout['name'])))
ret['rlength']='10'
if hasnote(rout):
ret['note']=rout['note']
rout['note']=['See elsewhere.']
return ret
def modsign2map(m):
"""
modulename
"""
if ismodule(m):
ret={'f90modulename':m['name'],
'F90MODULENAME':m['name'].upper(),
'texf90modulename':m['name'].replace('_', '\\_')}
else:
ret={'modulename':m['name'],
'MODULENAME':m['name'].upper(),
'texmodulename':m['name'].replace('_', '\\_')}
ret['restdoc'] = getrestdoc(m) or []
if hasnote(m):
ret['note']=m['note']
#m['note']=['See elsewhere.']
ret['usercode'] = getusercode(m) or ''
ret['usercode1'] = getusercode1(m) or ''
if m['body']:
ret['interface_usercode'] = getusercode(m['body'][0]) or ''
else:
ret['interface_usercode'] = ''
ret['pymethoddef'] = getpymethoddef(m) or ''
if 'coutput' in m:
ret['coutput'] = m['coutput']
if 'f2py_wrapper_output' in m:
ret['f2py_wrapper_output'] = m['f2py_wrapper_output']
return ret
def cb_sign2map(a,var,index=None):
ret={'varname':a}
if index is None or 1: # disable 7712 patch
ret['varname_i'] = ret['varname']
else:
ret['varname_i'] = ret['varname'] + '_' + str(index)
ret['ctype']=getctype(var)
if ret['ctype'] in c2capi_map:
ret['atype']=c2capi_map[ret['ctype']]
if ret['ctype'] in cformat_map:
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isarray(var):
ret=dictappend(ret, getarrdims(a, var))
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var)
if hasnote(var):
ret['note']=var['note']
var['note']=['See elsewhere.']
return ret
def cb_routsign2map(rout, um):
"""
name,begintitle,endtitle,argname
ctype,rctype,maxnofargs,nofoptargs,returncptr
"""
ret={'name':'cb_%s_in_%s'%(rout['name'], um),
'returncptr':''}
if isintent_callback(rout):
if '_' in rout['name']:
F_FUNC='F_FUNC_US'
else:
F_FUNC='F_FUNC'
ret['callbackname'] = '%s(%s,%s)' \
% (F_FUNC,
rout['name'].lower(),
rout['name'].upper(),
)
ret['static'] = 'extern'
else:
ret['callbackname'] = ret['name']
ret['static'] = 'static'
ret['argname']=rout['name']
ret['begintitle']=gentitle(ret['name'])
ret['endtitle']=gentitle('end of %s'%ret['name'])
ret['ctype']=getctype(rout)
ret['rctype']='void'
if ret['ctype']=='string': ret['rctype']='void'
else:
ret['rctype']=ret['ctype']
if ret['rctype']!='void':
if iscomplexfunction(rout):
ret['returncptr'] = """
#ifdef F2PY_CB_RETURNCOMPLEX
return_value=
#endif
"""
else:
ret['returncptr'] = 'return_value='
if ret['ctype'] in cformat_map:
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isstringfunction(rout):
ret['strlength']=getstrlength(rout)
if isfunction(rout):
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if hasnote(rout['vars'][a]):
ret['note']=rout['vars'][a]['note']
rout['vars'][a]['note']=['See elsewhere.']
ret['rname']=a
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout)
if iscomplexfunction(rout):
ret['rctype']="""
#ifdef F2PY_CB_RETURNCOMPLEX
#ctype#
#else
void
#endif
"""
else:
if hasnote(rout):
ret['note']=rout['note']
rout['note']=['See elsewhere.']
nofargs=0
nofoptargs=0
if 'args' in rout and 'vars' in rout:
for a in rout['args']:
var=rout['vars'][a]
if l_or(isintent_in, isintent_inout)(var):
nofargs=nofargs+1
if isoptional(var):
nofoptargs=nofoptargs+1
ret['maxnofargs']=repr(nofargs)
ret['nofoptargs']=repr(nofoptargs)
if hasnote(rout) and isfunction(rout) and 'result' in rout:
ret['routnote']=rout['note']
rout['note']=['See elsewhere.']
return ret
def common_sign2map(a, var): # obsolute
ret={'varname':a}
ret['ctype']=getctype(var)
if isstringarray(var):
ret['ctype']='char'
if ret['ctype'] in c2capi_map:
ret['atype']=c2capi_map[ret['ctype']]
if ret['ctype'] in cformat_map:
ret['showvalueformat']='%s'%(cformat_map[ret['ctype']])
if isarray(var):
ret=dictappend(ret, getarrdims(a, var))
elif isstring(var):
ret['size']=getstrlength(var)
ret['rank']='1'
ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var)
if hasnote(var):
ret['note']=var['note']
var['note']=['See elsewhere.']
ret['arrdocstr']=getarrdocsign(a, var) # for strings this returns 0-rank but actually is 1-rank
return ret
|
bsd-3-clause
| -7,510,528,198,512,104,000 | 4,265,871,499,726,551,000 | 37.03881 | 171 | 0.489457 | false |
eval1749/elang
|
build/android/gyp/package_resources.py
|
3
|
11007
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=C0301
"""Package resources into an apk.
See https://android.googlesource.com/platform/tools/base/+/master/legacy/ant-tasks/src/main/java/com/android/ant/AaptExecTask.java
and
https://android.googlesource.com/platform/sdk/+/master/files/ant/build.xml
"""
# pylint: enable=C0301
import optparse
import os
import re
import shutil
import sys
import zipfile
from util import build_utils
# List is generated from the chrome_apk.apk_intermediates.ap_ via:
# unzip -l $FILE_AP_ | cut -c31- | grep res/draw | cut -d'/' -f 2 | sort \
# | uniq | grep -- -tvdpi- | cut -c10-
# and then manually sorted.
# Note that we can't just do a cross-product of dimentions because the filenames
# become too big and aapt fails to create the files.
# This leaves all default drawables (mdpi) in the main apk. Android gets upset
# though if any drawables are missing from the default drawables/ directory.
DENSITY_SPLITS = {
'hdpi': (
'hdpi-v4', # Order matters for output file names.
'ldrtl-hdpi-v4',
'sw600dp-hdpi-v13',
'ldrtl-hdpi-v17',
'ldrtl-sw600dp-hdpi-v17',
'hdpi-v21',
),
'xhdpi': (
'xhdpi-v4',
'ldrtl-xhdpi-v4',
'sw600dp-xhdpi-v13',
'ldrtl-xhdpi-v17',
'ldrtl-sw600dp-xhdpi-v17',
'xhdpi-v21',
),
'xxhdpi': (
'xxhdpi-v4',
'ldrtl-xxhdpi-v4',
'sw600dp-xxhdpi-v13',
'ldrtl-xxhdpi-v17',
'ldrtl-sw600dp-xxhdpi-v17',
'xxhdpi-v21',
),
'xxxhdpi': (
'xxxhdpi-v4',
'ldrtl-xxxhdpi-v4',
'sw600dp-xxxhdpi-v13',
'ldrtl-xxxhdpi-v17',
'ldrtl-sw600dp-xxxhdpi-v17',
'xxxhdpi-v21',
),
'tvdpi': (
'tvdpi-v4',
'sw600dp-tvdpi-v13',
'ldrtl-sw600dp-tvdpi-v17',
),
}
def _ParseArgs(args):
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk', help='path to the Android SDK folder')
parser.add_option('--aapt-path',
help='path to the Android aapt tool')
parser.add_option('--configuration-name',
help='Gyp\'s configuration name (Debug or Release).')
parser.add_option('--android-manifest', help='AndroidManifest.xml path')
parser.add_option('--version-code', help='Version code for apk.')
parser.add_option('--version-name', help='Version name for apk.')
parser.add_option(
'--shared-resources',
action='store_true',
help='Make a resource package that can be loaded by a different'
'application at runtime to access the package\'s resources.')
parser.add_option(
'--app-as-shared-lib',
action='store_true',
help='Make a resource package that can be loaded as shared library')
parser.add_option('--resource-zips',
default='[]',
help='zip files containing resources to be packaged')
parser.add_option('--asset-dir',
help='directories containing assets to be packaged')
parser.add_option('--no-compress', help='disables compression for the '
'given comma separated list of extensions')
parser.add_option(
'--create-density-splits',
action='store_true',
help='Enables density splits')
parser.add_option('--language-splits',
default='[]',
help='GYP list of languages to create splits for')
parser.add_option('--apk-path',
help='Path to output (partial) apk.')
options, positional_args = parser.parse_args(args)
if positional_args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('android_sdk', 'aapt_path', 'configuration_name',
'android_manifest', 'version_code', 'version_name',
'apk_path')
build_utils.CheckOptions(options, parser, required=required_options)
options.resource_zips = build_utils.ParseGypList(options.resource_zips)
options.language_splits = build_utils.ParseGypList(options.language_splits)
return options
def MoveImagesToNonMdpiFolders(res_root):
"""Move images from drawable-*-mdpi-* folders to drawable-* folders.
Why? http://crbug.com/289843
"""
for src_dir_name in os.listdir(res_root):
src_components = src_dir_name.split('-')
if src_components[0] != 'drawable' or 'mdpi' not in src_components:
continue
src_dir = os.path.join(res_root, src_dir_name)
if not os.path.isdir(src_dir):
continue
dst_components = [c for c in src_components if c != 'mdpi']
assert dst_components != src_components
dst_dir_name = '-'.join(dst_components)
dst_dir = os.path.join(res_root, dst_dir_name)
build_utils.MakeDirectory(dst_dir)
for src_file_name in os.listdir(src_dir):
if not src_file_name.endswith('.png'):
continue
src_file = os.path.join(src_dir, src_file_name)
dst_file = os.path.join(dst_dir, src_file_name)
assert not os.path.lexists(dst_file)
shutil.move(src_file, dst_file)
def PackageArgsForExtractedZip(d):
"""Returns the aapt args for an extracted resources zip.
A resources zip either contains the resources for a single target or for
multiple targets. If it is multiple targets merged into one, the actual
resource directories will be contained in the subdirectories 0, 1, 2, ...
"""
subdirs = [os.path.join(d, s) for s in os.listdir(d)]
subdirs = [s for s in subdirs if os.path.isdir(s)]
is_multi = '0' in [os.path.basename(s) for s in subdirs]
if is_multi:
res_dirs = sorted(subdirs, key=lambda p : int(os.path.basename(p)))
else:
res_dirs = [d]
package_command = []
for d in res_dirs:
MoveImagesToNonMdpiFolders(d)
package_command += ['-S', d]
return package_command
def _GenerateDensitySplitPaths(apk_path):
for density, config in DENSITY_SPLITS.iteritems():
src_path = '%s_%s' % (apk_path, '_'.join(config))
dst_path = '%s_%s' % (apk_path, density)
yield src_path, dst_path
def _GenerateLanguageSplitOutputPaths(apk_path, languages):
for lang in languages:
yield '%s_%s' % (apk_path, lang)
def RenameDensitySplits(apk_path):
"""Renames all density splits to have shorter / predictable names."""
for src_path, dst_path in _GenerateDensitySplitPaths(apk_path):
shutil.move(src_path, dst_path)
def CheckForMissedConfigs(apk_path, check_density, languages):
"""Raises an exception if apk_path contains any unexpected configs."""
triggers = []
if check_density:
triggers.extend(re.compile('-%s' % density) for density in DENSITY_SPLITS)
if languages:
triggers.extend(re.compile(r'-%s\b' % lang) for lang in languages)
with zipfile.ZipFile(apk_path) as main_apk_zip:
for name in main_apk_zip.namelist():
for trigger in triggers:
if trigger.search(name) and not 'mipmap-' in name:
raise Exception(('Found config in main apk that should have been ' +
'put into a split: %s\nYou need to update ' +
'package_resources.py to include this new ' +
'config (trigger=%s)') % (name, trigger.pattern))
def _ConstructMostAaptArgs(options):
package_command = [
options.aapt_path,
'package',
'--version-code', options.version_code,
'--version-name', options.version_name,
'-M', options.android_manifest,
'--no-crunch',
'-f',
'--auto-add-overlay',
'-I', os.path.join(options.android_sdk, 'android.jar'),
'-F', options.apk_path,
'--ignore-assets', build_utils.AAPT_IGNORE_PATTERN,
]
if options.no_compress:
for ext in options.no_compress.split(','):
package_command += ['-0', ext]
if options.shared_resources:
package_command.append('--shared-lib')
if options.app_as_shared_lib:
package_command.append('--app-as-shared-lib')
if options.asset_dir and os.path.exists(options.asset_dir):
package_command += ['-A', options.asset_dir]
if options.create_density_splits:
for config in DENSITY_SPLITS.itervalues():
package_command.extend(('--split', ','.join(config)))
if options.language_splits:
for lang in options.language_splits:
package_command.extend(('--split', lang))
if 'Debug' in options.configuration_name:
package_command += ['--debug-mode']
return package_command
def _OnStaleMd5(package_command, options):
with build_utils.TempDir() as temp_dir:
if options.resource_zips:
dep_zips = options.resource_zips
for z in dep_zips:
subdir = os.path.join(temp_dir, os.path.basename(z))
if os.path.exists(subdir):
raise Exception('Resource zip name conflict: ' + os.path.basename(z))
build_utils.ExtractAll(z, path=subdir)
package_command += PackageArgsForExtractedZip(subdir)
build_utils.CheckOutput(
package_command, print_stdout=False, print_stderr=False)
if options.create_density_splits or options.language_splits:
CheckForMissedConfigs(options.apk_path, options.create_density_splits,
options.language_splits)
if options.create_density_splits:
RenameDensitySplits(options.apk_path)
def main(args):
args = build_utils.ExpandFileArgs(args)
options = _ParseArgs(args)
package_command = _ConstructMostAaptArgs(options)
output_paths = [ options.apk_path ]
if options.create_density_splits:
for _, dst_path in _GenerateDensitySplitPaths(options.apk_path):
output_paths.append(dst_path)
output_paths.extend(
_GenerateLanguageSplitOutputPaths(options.apk_path,
options.language_splits))
input_paths = [ options.android_manifest ] + options.resource_zips
input_strings = []
input_strings.extend(package_command)
# The md5_check.py doesn't count file path in md5 intentionally,
# in order to repackage resources when assets' name changed, we need
# to put assets into input_strings, as we know the assets path isn't
# changed among each build if there is no asset change.
if options.asset_dir and os.path.exists(options.asset_dir):
asset_paths = []
for root, _, filenames in os.walk(options.asset_dir):
asset_paths.extend(os.path.join(root, f) for f in filenames)
input_paths.extend(asset_paths)
input_strings.extend(sorted(asset_paths))
build_utils.CallAndWriteDepfileIfStale(
lambda: _OnStaleMd5(package_command, options),
options,
input_paths=input_paths,
input_strings=input_strings,
output_paths=output_paths)
if __name__ == '__main__':
main(sys.argv[1:])
|
apache-2.0
| -8,969,611,362,770,051,000 | -4,535,402,923,037,202,400 | 33.077399 | 130 | 0.653493 | false |
goir/virtualenv-creator
|
virtualenv.py
|
6
|
99413
|
#!/usr/bin/env python
"""Create a "virtual" Python installation
"""
__version__ = "13.1.0"
virtualenv_version = __version__ # legacy
import base64
import sys
import os
import codecs
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import glob
import distutils.sysconfig
from distutils.util import strtobool
import struct
import subprocess
import tarfile
if sys.version_info < (2, 6):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.6 or greater.')
sys.exit(101)
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
is_cygwin = (sys.platform == 'cygwin')
is_darwin = (sys.platform == 'darwin')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if is_win:
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
# Return a mapping of version -> Python executable
# Only provided for Windows, where the information in the registry is used
if not is_win:
def get_installed_pythons():
return {}
else:
try:
import winreg
except ImportError:
import _winreg as winreg
def get_installed_pythons():
try:
python_core = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
"Software\\Python\\PythonCore")
except WindowsError:
# No registered Python installations
return {}
i = 0
versions = []
while True:
try:
versions.append(winreg.EnumKey(python_core, i))
i = i + 1
except WindowsError:
break
exes = dict()
for ver in versions:
try:
path = winreg.QueryValue(python_core, "%s\\InstallPath" % ver)
except WindowsError:
continue
exes[ver] = join(path, "python.exe")
winreg.CloseKey(python_core)
# Add the major versions
# Sort the keys, then repeatedly update the major version entry
# Last executable (i.e., highest version) wins with this approach
for ver in sorted(exes):
exes[ver[0]] = exes[ver]
return exes
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy', 'tokenize', 'token',
'functools', 'heapq', 'bisect', 'weakref',
'reprlib'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver >= 3:
import sysconfig
platdir = sysconfig.get_config_var('PLATDIR')
REQUIRED_FILES.append(platdir)
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
#"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
#"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
"imp",
"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
#"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
#"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if minver >= 4:
REQUIRED_MODULES.extend([
'operator',
'_collections_abc',
'_bootlocale',
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest, symlink=True):
if os.path.isdir(src):
shutil.copytree(src, dest, symlink)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s', os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest, symlink)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest, symlink)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content.encode("utf-8"):
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in reversed(dirs):
files = glob.glob(os.path.join(dir, filename))
if files and os.path.isfile(files[0]):
return True, files[0]
return False, filename
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = [here, join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(
os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action == 'store_false':
val = not strtobool(val)
elif option.action in ('store_true', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occurred during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity.")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity.')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch.")
parser.set_defaults(system_site_packages=False)
parser.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="DEPRECATED. Retained only for backward compatibility. "
"Not having access to global site-packages is now the default behavior.")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give the virtual environment access to the global site-packages.")
parser.add_option(
'--always-copy',
dest='symlink',
action='store_false',
default=True,
help="Always copy files rather than symlinking.")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools when installing it.")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative.')
parser.add_option(
'--no-setuptools',
dest='no_setuptools',
action='store_true',
help='Do not install setuptools (or pip) in the new virtualenv.')
parser.add_option(
'--no-pip',
dest='no_pip',
action='store_true',
help='Do not install pip in the new virtualenv.')
parser.add_option(
'--no-wheel',
dest='no_wheel',
action='store_true',
help='Do not install wheel in the new virtualenv.')
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
metavar='DIR',
default=default_search_dirs,
help="Directory to look for setuptools/pip distributions in. "
"This option can be used multiple times.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
default=True,
help="DEPRECATED. Retained only for backward compatibility. This option has no effect. "
"Virtualenv never downloads pip or setuptools.")
parser.add_option(
'--prompt',
dest='prompt',
help='Provides an alternative prompt prefix for this environment.')
parser.add_option(
'--setuptools',
dest='setuptools',
action='store_true',
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.")
parser.add_option(
'--distribute',
dest='distribute',
action='store_true',
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.")
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
if not options.never_download:
logger.warn('The --never-download option is for backward compatibility only.')
logger.warn('Setting it to false is no longer supported, and will be ignored.')
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=True,
no_setuptools=options.no_setuptools,
no_pip=options.no_pip,
no_wheel=options.no_wheel,
symlink=options.symlink)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def filter_install_output(line):
if line.strip().startswith('running'):
return Logger.INFO
return Logger.DEBUG
def find_wheels(projects, search_dirs):
"""Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT
"""
wheels = []
# Look through SEARCH_DIRS for the first suitable wheel. Don't bother
# about version checking here, as this is simply to get something we can
# then use to install the correct version.
for project in projects:
for dirname in search_dirs:
# This relies on only having "universal" wheels available.
# The pattern could be tightened to require -py2.py3-none-any.whl.
files = glob.glob(os.path.join(dirname, project + '-*.whl'))
if files:
wheels.append(os.path.abspath(files[0]))
break
else:
# We're out of luck, so quit with a suitable error
logger.fatal('Cannot find a wheel for %s' % (project,))
return wheels
def install_wheel(project_names, py_executable, search_dirs=None):
if search_dirs is None:
search_dirs = file_search_dirs()
wheels = find_wheels(['setuptools', 'pip'], search_dirs)
pythonpath = os.pathsep.join(wheels)
findlinks = ' '.join(search_dirs)
cmd = [
py_executable, '-c',
'import sys, pip; sys.exit(pip.main(["install", "--ignore-installed"] + sys.argv[1:]))',
] + project_names
logger.start_progress('Installing %s...' % (', '.join(project_names)))
logger.indent += 2
try:
call_subprocess(cmd, show_stdout=False,
extra_env = {
'PYTHONPATH': pythonpath,
'JYTHONPATH': pythonpath, # for Jython < 3.x
'PIP_FIND_LINKS': findlinks,
'PIP_USE_WHEEL': '1',
'PIP_PRE': '1',
'PIP_NO_INDEX': '1'
}
)
finally:
logger.indent -= 2
logger.end_progress()
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False,
prompt=None, search_dirs=None, never_download=False,
no_setuptools=False, no_pip=False, no_wheel=False,
symlink=True):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear, symlink=symlink))
install_distutils(home_dir)
if not no_setuptools:
to_install = ['setuptools']
if not no_pip:
to_install.append('pip')
if not no_wheel:
to_install.append('wheel')
install_wheel(to_install, py_executable, search_dirs)
install_activate(home_dir, bin_dir, prompt)
def is_executable_file(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if is_win:
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
import ctypes
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
size = max(len(home_dir)+1, 256)
buf = ctypes.create_unicode_buffer(size)
try:
u = unicode
except NameError:
u = str
ret = GetShortPathName(u(home_dir), buf, size)
if not ret:
print('Error: the path "%s" has a space in it' % home_dir)
print('We could not determine the short pathname for it.')
print('Exiting.')
sys.exit(3)
home_dir = str(buf.value)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
if is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
elif not is_win:
lib_dir = join(home_dir, 'lib', py_version)
multiarch_exec = '/usr/bin/multiarch-platform'
if is_executable_file(multiarch_exec):
# In Mageia (2) and Mandriva distros the include dir must be like:
# virtualenv/include/multiarch-x86_64-linux/python2.7
# instead of being virtualenv/include/python2.7
p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# stdout.strip is needed to remove newline character
inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags)
else:
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if is_darwin:
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"),
# Python 2.6 no-frameworks
os.path.join("~", ".local", "lib","python", sys.version[:3], "site-packages"),
# System Python 2.7 on OSX Mountain Lion
os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
if hasattr(sys, 'base_prefix'):
prefixes.append(sys.base_prefix)
prefixes = list(map(os.path.expanduser, prefixes))
prefixes = list(map(os.path.abspath, prefixes))
# Check longer prefixes first so we don't split in the middle of a filename
prefixes = sorted(prefixes, key=len, reverse=True)
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
if src_prefix != os.sep: # sys.prefix == "/"
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix, symlink):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
# special-case custom readline.so on OS X, but not for pypy:
if modname == 'readline' and sys.platform == 'darwin' and not (
is_pypy or filename.endswith(join('lib-dynload', 'readline.so'))):
dst_filename = join(dst_prefix, 'lib', 'python%s' % sys.version[:3], 'readline.so')
elif modname == 'readline' and sys.platform == 'win32':
# special-case for Windows, where readline is not a
# standard module, though it may have been installed in
# site-packages by a third-party package
pass
else:
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename, symlink)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1], symlink)
finally:
sys.path = _prev_sys_path
def subst_path(prefix_path, prefix, home_dir):
prefix_path = os.path.normpath(prefix_path)
prefix = os.path.normpath(prefix)
home_dir = os.path.normpath(home_dir)
if not prefix_path.startswith(prefix):
logger.warn('Path not in prefix %r %r', prefix_path, prefix)
return
return prefix_path.replace(prefix, home_dir, 1)
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear, symlink=True):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
elif hasattr(sys, 'base_prefix'):
logger.notify('Using base prefix %r' % sys.base_prefix)
prefix = sys.base_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir, symlink)
stdlib_dirs = [os.path.dirname(os.__file__)]
if is_win:
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif is_darwin:
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn), symlink)
# ...and modules
copy_required_modules(home_dir, symlink)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir, symlink)
else:
logger.debug('No include dir %s' % stdinc_dir)
platinc_dir = distutils.sysconfig.get_python_inc(plat_specific=1)
if platinc_dir != stdinc_dir:
platinc_dest = distutils.sysconfig.get_python_inc(
plat_specific=1, prefix=home_dir)
if platinc_dir == platinc_dest:
# Do platinc_dest manually due to a CPython bug;
# not http://bugs.python.org/issue3386 but a close cousin
platinc_dest = subst_path(platinc_dir, prefix, home_dir)
if platinc_dest:
# PyPy's stdinc_dir and prefix are relative to the original binary
# (traversing virtualenvs), whereas the platinc_dir is relative to
# the inner virtualenv and ignores the prefix argument.
# This seems more evolved than designed.
copyfile(platinc_dir, platinc_dest, symlink)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if is_win:
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn), symlink)
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name), symlink)
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
# OS X framework builds cause validation to break
# https://github.com/pypa/virtualenv/issues/322
if os.environ.get('__PYVENV_LAUNCHER__'):
del os.environ["__PYVENV_LAUNCHER__"]
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if is_win or is_cygwin:
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
if sys.platform in ('win32', 'cygwin'):
python_executable += '.exe'
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable, symlink)
if is_win:
for name in ['libexpat.dll', 'libpypy.dll', 'libpypy-c.dll',
'libeay32.dll', 'ssleay32.dll', 'sqlite3.dll',
'tcl85.dll', 'tk85.dll']:
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name), symlink)
for d in sys.path:
if d.endswith('lib_pypy'):
break
else:
logger.fatal('Could not find lib_pypy in sys.path')
raise SystemExit(3)
logger.info('Copying lib_pypy')
copyfile(d, os.path.join(home_dir, 'lib_pypy'), symlink)
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext.lower() == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib,
symlink)
# And then change the install_name of the copied python executable
try:
mach_o_change(py_executable,
os.path.join(prefix, 'Python'),
'@executable_path/../.Python')
except:
e = sys.exc_info()[1]
logger.warn("Could not call mach_o_change: %s. "
"Trying to call install_name_tool instead." % e)
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal("Could not call install_name_tool -- you must "
"have Apple's development tools installed")
raise
if not is_win:
# Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist
py_exe_version_major = 'python%s' % sys.version_info[0]
py_exe_version_major_minor = 'python%s.%s' % (
sys.version_info[0], sys.version_info[1])
py_exe_no_version = 'python'
required_symlinks = [ py_exe_no_version, py_exe_version_major,
py_exe_version_major_minor ]
py_executable_base = os.path.basename(py_executable)
if py_executable_base in required_symlinks:
# Don't try to symlink to yourself.
required_symlinks.remove(py_executable_base)
for pth in required_symlinks:
full_pth = join(bin_dir, pth)
if os.path.exists(full_pth):
os.unlink(full_pth)
if symlink:
os.symlink(py_executable_base, full_pth)
else:
copyfile(py_executable, full_pth, symlink)
if is_win and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
# NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks
cmd = [py_executable, '-c', 'import sys;out=sys.stdout;'
'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if is_win:
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir, symlink)
if site_packages:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
home_dir = os.path.abspath(home_dir)
if is_win or is_jython and os._name == 'nt':
files = {
'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT,
'activate.ps1': ACTIVATE_PS,
}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/'))
home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" %
(home_dir, home_dir_msys))
files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh)
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir, symlink=True):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
copyfile(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name), symlink)
def fix_lib64(lib_dir, symlink=True):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
# PyPy's library path scheme is not affected by this.
# Return early or we will die on the following assert.
if is_pypy:
logger.debug('PyPy detected, skipping lib64 symlinking')
return
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
top_level = os.path.dirname(lib_parent)
lib_dir = os.path.join(top_level, 'lib')
lib64_link = os.path.join(top_level, 'lib64')
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
if os.path.lexists(lib64_link):
return
if symlink:
os.symlink('lib', lib64_link)
else:
copyfile('lib', lib64_link)
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
# If the "executable" is a version number, get the installed executable for
# that version
python_versions = get_installed_pythons()
if exe in python_versions:
exe = python_versions[exe]
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir, bin_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py',
'activate.fish', 'activate.csh']
def fixup_scripts(home_dir, bin_dir):
if is_win:
new_shebang_args = (
'%s /c' % os.path.normcase(os.environ.get('COMSPEC', 'cmd.exe')),
'', '.exe')
else:
new_shebang_args = ('/usr/bin/env', sys.version[:3], '')
# This is what we expect at the top of scripts:
shebang = '#!%s' % os.path.normcase(os.path.join(
os.path.abspath(bin_dir), 'python%s' % new_shebang_args[2]))
# This is what we'll put:
new_shebang = '#!%s python%s%s' % new_shebang_args
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
old_shebang = lines[0].strip()
old_shebang = old_shebang[0:2] + os.path.normcase(old_shebang[2:])
if not old_shebang.startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
script = relative_script([new_shebang] + lines[1:])
f = open(filename, 'wb')
f.write('\n'.join(script).encode('utf-8'))
f.close()
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:]
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.readline().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = codecs.open(filename, 'r', encoding='utf-8')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzFPf1z2zaWv/OvwMqToZTKdOJ0e3tO3RsncVrfuYm3yc7m1vXoKAmyWFMkS5C2tTd3f/u9DwAE
+CHb2+6cphNLJPDw8PC+8PAeOhqNTopCZkuxyZd1KoWScblYiyKu1kqs8lJU66Rc7hdxWW3h6eIm
vpZKVLlQWxVhqygInv/GT/BcfF4nyqAA3+K6yjdxlSziNN2KZFPkZSWXYlmXSXYtkiypkjhN/g4t
8iwSz387BsFZJmDmaSJLcStLBXCVyFfiYlut80yM6wLn/DL6Y/xqMhVqUSZFBQ1KjTNQZB1XQSbl
EtCElrUCUiaV3FeFXCSrZGEb3uV1uhRFGi+k+K//4qlR0zAMVL6Rd2tZSpEBMgBTAqwC8YCvSSkW
+VJGQryRixgH4OcNsQKGNsU1U0jGLBdpnl3DnDK5kErF5VaM53VFgAhlscwBpwQwqJI0De7y8kZN
YElpPe7gkYiZPfzJMHvAPHH8LucAjh+z4C9Zcj9l2MA9CK5aM9uUcpXcixjBwk95Lxcz/WycrMQy
Wa2ABlk1wSYBI6BEmswPClqOb/UKfXdAWFmujGEMiShzY35JPaLgrBJxqoBt6wJppAjzd3KexBlQ
I7uF4QAikDToG2eZqMqOQ7MTOQAocR0rkJKNEuNNnGTArD/GC0L7r0m2zO/UhCgAq6XEL7Wq3PmP
ewgArR0CTANcLLOadZYmNzLdTgCBz4B9KVWdVigQy6SUiyovE6kIAKC2FfIekJ6KuJSahMyZRm6n
RH+iSZLhwqKAocDjSyTJKrmuS5IwsUqAc4Er3n/8Sbw7fXN28kHzmAHGMnu9AZwBCi20gxMMIA5q
VR6kOQh0FJzjHxEvlyhk1zg+4NU0OHhwpYMxzL2I2n2cBQey68XVw8AcK1AmNFZA/f4bukzVGujz
Pw+sdxCcDFGFJs7f7tY5yGQWb6RYx8xfyBnBtxrOd1FRrV8DNyiEUwGpFC4OIpggPCCJS7NxnklR
AIulSSYnAVBoTm39VQRW+JBn+7TWLU4ACGWQwUvn2YRGzCRMtAvrNeoL03hLM9NNArvOm7wkxQH8
ny1IF6VxdkM4KmIo/jaX10mWIULIC0G4F9LA6iYBTlxG4pxakV4wjUTI2otbokjUwEvIdMCT8j7e
FKmcsviibt2tRmgwWQmz1ilzHLSsSL3SqjVT7eW9w+hLi+sIzWpdSgBezz2hW+X5VMxBZxM2Rbxh
8arucuKcoEeeqBPyBLWEvvgdKHqiVL2R9iXyCmgWYqhgladpfgckOwoCIfawkTHKPnPCW3gH/wJc
/DeV1WIdBM5IFrAGhcgPgUIgYBJkprlaI+Fxm2bltpJJMtYUebmUJQ31OGIfMOKPbIxzDT7klTZq
PF1c5XyTVKiS5tpkJmzxsrBi/fia5w3TAMutiGamaUOnDU4vLdbxXBqXZC5XKAl6kV7bZYcxg54x
yRZXYsNWBt4BWWTCFqRfsaDSWVWSnACAwcIXZ0lRp9RIIYOJGAbaFAR/E6NJz7WzBOzNZjlAhcTm
ewH2B3D7O4jR3ToB+iwAAmgY1FKwfPOkKtFBaPRR4Bt905/HB049W2nbxEOu4iTVVj7OgjN6eFqW
JL4LWWCvqSaGghlmFbp21xnQEcV8NBoFgXGHtsp8zVVQldsjYAVhxpnN5nWChm82Q1Ovf6iARxHO
wF43287CAw1hOn0AKjldVmW+wdd2bp9AmcBY2CPYExekZSQ7yB4nvkbyuSq9ME3RdjvsLFAPBRc/
nb4/+3L6SRyLy0alTdv67ArGPM1iYGuyCMBUrWEbXQYtUfElqPvEezDvxBRgz6g3ia+Mqxp4F1D/
XNb0Gqax8F4Gpx9O3pyfzv7y6fSn2aezz6eAINgZGezRlNE81uAwqgiEA7hyqSJtX4NOD3rw5uST
fRDMEjX75mtgN3gyvpYVMHE5hhlPRbiJ7xUwaDilphPEsdMALHg4mYjvxOHz568OCVqxLbYADMyu
0xQfzrRFnyXZKg8n1PgXdumPWUlp/+3y6OsrcXwswl/i2zgMwIdqmjJL/Eji9HlbSOhawZ9xriZB
sJQrEL0biQI6fk5+8YQ7wJJAy1zb6V/yJDPvmSvdIUh/jKkH4DCbLdJYKWw8m4VABOrQ84EOETvX
KHVj6Fhs3a4TjQp+SgkLm2GXKf7Tg2I8p36IBqPodjGNQFw3i1hJbkXTh36zGeqs2WysBwRhJokB
h4vVUChME9RZZQJ+LXEe6rC5ylP8ifBRC5AA4tYKtSQukt46RbdxWks1diYFRByPW2RERZso4kdw
UcZgiZulm0za1DQ8A82AfGkOWrRsUQ4/e+DvgLoymzjc6PHei2mGmP477zQIB3A5Q1T3SrWgsHYU
F6cX4tWLw310Z2DPubTU8ZqjhU6yWtqHK1gtIw+MMPcy8uLSZYV6Fp8e7Ya5iezKdFlhpZe4lJv8
Vi4BW2RgZ5XFT/QGduYwj0UMqwh6nfwBVqHGb4xxH8qzB2lB3wGotyEoZv3N0u9xMEBmChQRb6yJ
1HrXz6awKPPbBJ2N+Va/BFsJyhItpnFsAmfhPCZDkwgaArzgDCl1J0NQh2XNDivhjSDRXiwbxRoR
uHPU1Ff09SbL77IZ74SPUemOJ5Z1UbA082KDZgn2xHuwQoBkDhu7hmgMBVx+gbK1D8jD9GG6QFna
WwAgMPSKtmsOLLPVoynyrhGHRRiT14KEt5ToL9yaIWirZYjhQKK3kX1gtARCgslZBWdVg2YylDXT
DAZ2SOJz3XnEW1AfQIuKEZjNsYbGjQz9Lo9AOYtzVyk5/dAif/nyhdlGrSm+gojNcdLoQqzIWEbF
FgxrAjrBeGQcrSE2uAPnFsDUSrOm2P8k8oK9MVjPCy3b4AfA7q6qiqODg7u7u0hHF/Ly+kCtDv74
p2+++dML1onLJfEPTMeRFh1qiw7oHXq00bfGAn1nVq7Fj0nmcyPBGkvyysgVRfy+r5NlLo72J1Z/
Ihc3Zhr/Na4MKJCZGZSpDLQdNRg9U/vPoldqJJ6RdbZtxxP2S7RJtVbMt7rQo8rBEwC/ZZHXaKob
TlDiK7BusENfynl9HdrBPRtpfsBUUU7Hlgf2X14hBj5nGL4ypniGWoLYAi2+Q/qfmG1i8o60hkDy
oonq7J63/VrMEHf5eHm3vqYjNGaGiULuQInwmzxaAG3jruTgR7u2aPcc19Z8PENgLH1gmFc7lmMU
HMIF12LqSp3D1ejxgjTdsWoGBeOqRlDQ4CTOmdoaHNnIEEGid2M2+7ywugXQqRU5NPEBswrQwh2n
Y+3arOB4QsgDx+IlPZHgIh913r3gpa3TlAI6LR71qMKAvYVGO50DX44NgKkYlX8ZcUuzTfnYWhRe
gx5gOceAkMFWHWbCN64PONob9bBTx+oP9WYa94HARRpzLOpR0AnlYx6hVCBNxdjvOcTilrjdwXZa
HGIqs0wk0mpAuNrKo1eodhqmVZKh7nUWKVqkOXjFVisSIzXvfWeB9kH4uM+YaQnUZGjI4TQ6Jm/P
E8BQt8Pw2XWNgQY3DoMYbRJF1g3JtIZ/wK2g+AYFo4CWBM2CeayU+RP7HWTOzld/GWAPS2hkCLfp
kBvSsRgajnm/J5CMOhoDUpABCbvCSK4jq4MUOMxZIE+44bUclG6CESmQM8eCkJoB3Omlt8HBJxGe
gJCEIuT7SslCfCVGsHxtUX2c7v5dudQEIcZOA3IVdPTi2I1sOFGN41aUw2doP75BZyVFDhw8B5fH
DfS7bG6Y1gZdwFn3FbdFCjQyxWFGExfVK0MYN5j8h2OnRUMsM4hhKG8g70jHjDQJ7HJr0LDgBoy3
5u2x9GM3YoF9x2GuDuXmHvZ/YZmoRa5Cipm0YxfuR3NFlzYW2/NkPoI/3gKMJlceJJnq+AVGWf6B
QUIPetgH3ZsshkWWcXmXZCEpME2/Y39pOnhYUnpG7uATbacOYKIY8Tx4X4KA0NHnAYgTagLYlctQ
abe/C3bnFEcWLncfeW7z5dGrqy5xp0MRHvvpX6rT+6qMFa5WyovGQoGr1TXgqHRhcnG21YeX+nAb
twllrmAXKT5++iKQEBzXvYu3T5t6w/CIzYNz8j4GddBrD5KrNTtiF0AEtSIyykH4dI58PLJPndyO
iT0ByJMYZseiGEiaT/4ROLsWCsbYX24zjKO1VQZ+4PU3X896IqMukt98PXpglBYx+sR+3PIE7cic
VLBrtqWMU3I1nD4UVMwa1rFtignrc9r+aR676vE5NVo29t3fAj8GCobUJfgIL6YN2bpTxY/vTg3C
03ZqB7DObtV89mgRYG+fz3+BHbLSQbXbOEnpXAEmv7+PytVs7jle0a89PEg7FYxDgr79l7p8AdwQ
cjRh0p2OdsZOTMC5ZxdsPkWsuqjs6RyC5gjMywtwjz+HFU6ve+B7Bge/r7p8IiBvTqMeMmpbbIZ4
wQclhz1K9gnzfvqMf9dZP27mw4L1/zHLF/+cST5hKgaaNh4+rH5iuXbXAHuEeRpwO3e4hd2h+axy
ZZw7VklKPEfd9VzcUboCxVbxpAigLNnv64GDUqoPvd/WZclH16QCC1nu43HsVGCmlvH8ek3Mnjj4
ICvExDZbUKzayevJ+4Qv1NFnO5Ow2Tf0c+c6NzErmd0mJfQFhTsOf/j442nYb0IwjgudHm9FHu83
INwnMG6oiRM+pQ9T6Cld/nH10d66+AQ1GQEmIqzJ1iVsJxBs4gj9a/BARMg7sOVjdtyhL9ZycTOT
lDqAbIpdnaD4W3yNmNiMAj//S8UrSmKDmSzSGmnFjjdmH67qbEHnI5UE/0qnCmPqECUEcPhvlcbX
Ykydlxh60txI0anbuNTeZ1HmmJwq6mR5cJ0shfy1jlPc1svVCnDBwyv9KuLhKQIl3nFOAyctKrmo
y6TaAglileuzP0p/cBrOtzzRsYckH/MwATEh4kh8wmnjeybc0pDLBAf8Ew+cJO67sYOTrBDRc3if
5TMcdUY5vlNGqnsuT4+D9gg5ABgBUJj/aKIjd/4bSa/cA0Zac5eoqCU9UrqRhpycMYQynmCkg3/T
T58RXd4awPJ6GMvr3Vhet7G87sXy2sfyejeWrkjgwtqglZGEvsBV+1ijN9/GjTnxMKfxYs3tMPcT
czwBoijMBtvIFKdAe5EtPt8jIKS2nQNnetjkzyScVFrmHALXIJH78RBLb+ZN8rrTmbJxdGeeinFn
h3KI/L4HUUSpYnPqzvK2jKs48uTiOs3nILYW3WkDYCra6UQcK81uZ3OO7rYs1ejiPz//8PEDNkdQ
I5PeQN1wEdGw4FTGz+PyWnWlqdn8FcCO1NJPxKFuGuDeIyNrPMoe//OOMjyQccQdZSjkogAPgLK6
bDM39ykMW891kpR+zkzOh03HYpRVo2ZSA0Q6ubh4d/L5ZEQhv9H/jlyBMbT1pcPFx7SwDbr+m9vc
Uhz7gFDr2FZj/Nw5ebRuOOJhG2vAdjzf1oPDxxjs3jCBP8t/KqVgSYBQkQ7+PoVQj945/Kb9UIc+
hhE7yX/uyRo7K/adI3uOi+KIft+xQ3sA/7AT9xgzIIB2ocZmZ9DslVtK35rXHRR1gD7S1/vNe832
1qu9k/EpaifR4wA6lLXNht0/75yGjZ6S1ZvT788+nJ+9uTj5/IPjAqIr9/HTwaE4/fGLoPwQNGDs
E8WYGlFhJhIYFrfQSSxz+K/GyM+yrjhIDL3enZ/rk5oNlrpg7jPanAiecxqThcZBM45C24c6/wgx
SvUGyakponQdqjnC/dKG61lUrvOjqVRpjs5qrbdeulbM1JTRuXYE0geNXVIwCE4xg1eUxV6ZXWHJ
J4C6zqoHKW2jbWJISkHBTrqAc/5lTle8QCl1hidNZ63oL0MX1/AqUkWawE7udWhlSXfD9JiGcfRD
e8DNePVpQKc7jKwb8qwHsUCr9Trkuen+k4bRfq0Bw4bB3sG8M0npIZSBjcltIsRGfJITynv4apde
r4GCBcODvgoX0TBdArOPYXMt1glsIIAn12B9cZ8AEFor4R8IHDnRAZljdkb4drPc/3OoCeK3/vnn
nuZVme7/TRSwCxKcShT2ENNt/A42PpGMxOnH95OQkaPUXPHnGssDwCGhAKgj7ZS/xCfos7GS6Urn
l/j6AF9oP4Fet7qXsih1937XOEQJeKbG5DU8U4Z+IaZ7WdhTnMqkBRorHyxmWEHopiGYz574tJZp
qvPdz96dn4LviMUYKEF87nYKw3G8BI/QdfIdVzi2QOEBO7wukY1LdGEpyWIZec16g9YoctTby8uw
60SB4W6vThS4jBPloj3GaTMsU04QISvDWphlZdZutUEKu22I4igzzBKzi5ISWH2eAF6mpzFviWCv
hKUeJgLPp8hJVpmMxTRZgB4FlQsKdQpCgsTFekbivDzjGHheKlMGBQ+LbZlcrys83YDOEZVgYPMf
T76cn32gsoTDV43X3cOcU9oJTDmJ5BhTBDHaAV/ctD/kqtmsj2f1K4SB2gf+tF9xdsoxD9Dpx4FF
/NN+xXVox85OkGcACqou2uKBGwCnW5/cNLLAuNp9MH7cFMAGMx8MxSKx7EUnerjz63KibdkyJRT3
MS+fcICzKmxKmu7spqS1P3qOqwLPuZbj/kbwtk+2zGcOXW86b4aS39xPRwqxJBYw6rb2xzDZYZ2m
ejoOsw1xC21rtY39OXNipU67RYaiDEQcu50nLpP1K2HdnDnQS6PuABPfanSNJPaq8tHP2Uh7GB4m
ltidfYrpSGUsZAQwkiF17U8NPhRaBFAglP07diR3Onl+6M3RsQYPz1HrLrCNP4Ai1Lm4VOORl8CJ
8OVXdhz5FaGFevRIhI6nkskst3li+Llbo1f50p9jrwxQEBPFroyzazlmWFMD8yuf2AMhWNK2Hqkv
k6s+wyLOwDm9H+Dwrlz0H5wY1FqM0Gl3I7dtdeSTBxv0loLsJJgPvozvQPcXdTXmlRw4h+6tpRuG
+jBEzD6Epvr0fRxiOObXcGB9GsC91NCw0MP7deDsktfGOLLWPraqmkL7QnuwixK2ZpWiYxmnONH4
otYLaAzucWPyR/apThSyv3vqxJyYkAXKg7sgvbmNdINWOGHE5UpcOZpQOnxTTaPfLeWtTMFogJEd
Y7XDL7baYRLZcEpvHthvxu5ie7Htx43eNJgdmXIMRIAKMXoDPbsQanDAFf5Z70Ti7Iac47d/PZuK
tx9+gn/fyI9gQbHmcSr+BqOLt3kJ20ou2qXbFLCAo+L9Yl4rLIwkaHRCwRdPoLd24ZEXT0N0ZYlf
UmIVpMBk2nLDt50AijxBKmRv3ANTLwG/TUFXywk1DmLfWoz0S6TBcI0L1oUc6JbRutqkaCac4Eiz
iJej87O3px8+nUbVPTK2+Tlygid+HhZORx8Nl3gMNhX2yaLGJ1eOv/yDTIsed1nvNU29DO41RQjb
kcLuL/kmjdjuKeISAwai2C7zRYQtgdO5RK+6A/954mwrH7TvnnFFWOOJPjxrnHh8DNQQP7f1zwga
Uh89J+pJCMVzrBXjx9Go3wJPBUW04c/zm7ulGxDXRT80wTamzazHfnerAtdMZw3PchLhdWyXwdSB
pkmsNvOFWx/4MRP6IhRQbnS8IVdxnVZCZrCVor093UgBCt4t6WMJYVZhK0Z1bhSdSe/irXJyj2Il
RjjqiIrq8RyGAoWw9f4xvmEzgLWGouYSaIBOiNK2KXe6qnqxZgnmnRBRryff4C7JXrnJL5rCPChv
jBeN/wrzRG+RMbqWlZ4/PxhPLl82CQ4UjF54Bb2LAoydyyZ7oDGL58+fj8S/Pez0MCpRmuc34I0B
7F5n5ZxeDxhsPTm7Wl2H3ryJgB8Xa3kJD64oaG6f1xlFJHd0pQWR9q+BEeLahJYZTfuWOeZYXcnn
y9yCz6m0wfhLltB1RxhRkqhs9a1RGG0y0kQsCYohjNUiSUKOTsB6bPMaa/Ewuqj5Rd4DxycIZopv
8WCMd9hrdCwpb9Zyj0XnWIwI8IhSyng0KmamajTAc3ax1WjOzrKkaspIXrhnpvoKgMreYqT5SsR3
KBlmHi1iOGWdHqs2jnW+k0W9jUq+uHTjjK1Z8uuHcAfWBknLVyuDKTw0i7TIZbkw5hRXLFkklQPG
tEM43JkubyLrEwU9KI1AvZNVWFqJtm//YNfFxfQjHR/vm5F01lBlL8TimFCctfIKo6gZn6JPlpCW
b82XCYzygaLZ2hPwxhJ/0LFUrCHw7u1wyxnrTN/HwWkbzSUdAIfugLIK0rKjpyOci8csfGbagVs0
8EM7c8LtNimrOk5n+tqHGfppM3uervG0ZXA7CzyttwK+fQ6O777O2AfHwSTXID0x49ZUZByLlY5M
RG5lmV+EVeTo5R2yrwQ+BVJmOTP10CZ2dGnZ1Raa6gRHR8UjqK9M8dKAQ26qZjoFJy7mU0pvMuUO
A86zn29JV1eI78T41VQctnY+i2KLNzkBss+Woe+KUTeYihMMMHNs34shvjsW45dT8ccd0KOBAY4O
3RHa+9gWhEEgr66eTMY0mRPZwr4U9of76hxG0PSM4+SqTf4umb4lKv1ri0pcIagTlV+2E5VbYw/u
WzsfH8lwA4pjlcjl/jOFJNRIN7p5mMEJPyyg37M5Wrp2vKmoocK5OWxG7ho96GhE4zbbQUxRulZf
XL+LuoYNp71zwKTJtFIV7S1zmMao0WsRFQDM+o7S8Bve7QLvNSlc/2zwiFUXAViwPREEXenJB2ZN
w0ZQH3QEn6QBHmAUEeJhaqMoXMl6goiEdA8OMdFXrUNsh+N/d+bhEoOho9AOlt98vQtPVzB7izp6
FnR3pYUnsra8ollu8+kPzHmM0tf1NwmMA6URHXBWzVWV5GYeYfYy30GT2yzmDV4GSSfTaBJT6bpN
vJXmW7/Qj6HYASWTwVqAJ1Wv8CD5lu62PFGU9IZX1Hx9+HJqKoMZkJ7Aq+jVV/oKSOpmLj/wfeyp
3rvBS93vMPoXB1hS+b3tq85uhqZ13LoLyh8spOjZJJpZOjSG6eE6kGbNYoF3JjbEZN/aXgDyHryd
Ofg55vLTHBw22JBGfei6GqOR3iHVNiDAD5uMIcl5VNdGkSLSu4RtSHnuUpxPFgXdq9+CYAgBOX8d
8xt0BeviyIbYjE3Bk8+xm82Jn+qmt+6M7Qka2+om3DV97r9r7rpFYGdukhk6c/frS10a6L7DVrSP
Bhze0IR4VIlEo/H7jYlrB6Y6h6Y/Qq8/SH63E850wKw8BMZk7GC8n9hTY2/M/iZeuN8xIWyfL2R2
y4l7nY3WtDs2o83xj/EUOPkFn9sbBiijaak5kPdLdMPejHNkZ/L6Ws1ivN1xRptsyufq7J7Mtu09
Xc4nY7U1uy28tAhAGG7Smbducj0wBuhKvmWa06Gc22kEDU1Jw04WskqWbBL01g7ARRwxpf4mEM9p
xKNUYqBb1WVRwm54pO8i5jydvtTmBqgJ4G1idWNQNz2m+mpaUqyUHGZKkDlO20ryASKwEe+YhtnM
vgNeedFcs5BMLTPIrN7IMq6aK4b8jIAENl3NCFR0jovrhOcaqWxxiYtYYnnDQQoDZPb7V7Cx9DbV
O+5VmFht93h2oh465PuUKxscY2S4OLm31wu611ot6Wpr1zu0zRqus1cqwTKYu/JIR+pYGb/V93fx
HbMcyUf/0uEfkHe38tLPQrfqjL1bi4bzzFUI3Qub8MYAMs599zB2OKB742JrA2zH9/WFZZSOhznQ
2FJR++S9CqcZbdJEkDBh9IEIkl8U8MQIkgf/kREkfWsmGBqNj9YDvWUCD4SaWD24V1A2jAB9ZkAk
PMBuXWBoTOXYTbovcpXcj+yF0qwrnUo+Yx6QI7t3kxEIvmpSuRnK3lVwuyJIvnTR4+/PP745OSda
zC5O3v7HyfeUlIXHJS1b9egQW5bvM7X3vfRvN9ymE2n6Bm+w7bkhlmuYNITO+04OQg+E/nq1vgVt
KzL39VCHTt1PtxMgvnvaLahDKrsXcscv0zUmbvpMK0870E85qdb8cjITzCNzUsfi0JzEmffN4YmW
0U5seWjhnPTWrjrR/qq+BXQg7j2xSda0Anhmgvxlj0xMxYwNzLOD0v7ffFBmOFYbmht0QAoX0rnJ
kS5xZFCV//8TKUHZxbi3Y0dxau/mpnZ8PKTspfN49ruQkSGIV+436s7PFfalTAeoEASs8PQ9hYyI
0X/6QNWmHzxT4nKfCov3Udlc2V+4Ztq5/WuCSQaVve9LcYISH7NC41WduokDtk+nAzl9dBqVr5xK
FtB8B0DnRjwVsDf6S6wQ51sRwsZRu2SYHEt01Jf1Ocij3XSwN7R6IfaHyk7dskshXg43XLYqO3WP
Q+6hHuihalPc51hgzNIcqicV3xFkPs4UdMGX53zgGbre9sPX28uXR/ZwAfkdXzuKhLLJRo5hv3Sy
MXdeKul0J2Ypp5Suh3s1JySsW1w5UNknGNrbdEpSBvY/Js+BIY289/0hM9PDu3p/1MbUst4RTEmM
n6kJTcsp4tG42yeT7nQbtdUFwgVJjwDSUYEAC8F0dKOTILrlLO/xC70bnNd0Ha97whQ6UkHJYj5H
cA/j+zX4tbtTIfGjujOKpj83aHOgXnIQbvYduNXEC4UMm4T21Bs+GHABuCa7v//LR/TvpjHa7oe7
/Grb6lVvHSD7spj5iplBLRKZxxEYGdCbY9LWWC5hBB2voWno6DJUMzfkC3T8KJsWL9umDQY5szPt
AVijEPwfucjncQ==
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVVFvokAQfudXTLEPtTlLeo9tvMSmJpq02hSvl7u2wRUG2QR2DSxSe7n/frOACEVNLlceRHa+
nfl25pvZDswCnoDPQ4QoTRQsENIEPci4CsBMZBq7CAsuLOYqvmYKTTj3YxnBgiXBudGBjUzBZUJI
BXEqgCvweIyuCjeG4eF2F5x14bcB9KQiQQWrjSddI1/oQIx6SYYeoFjzWIoIhYI1izlbhJjkKO7D
M/QEmKfO9O7WeRo/zr4P7pyHwWxkwitcgwpQ5Ej96OX+PmiFwLeVjFUOrNYKaq1Nud3nR2n8nI2m
k9H0friPTGVsUdptaxGrTEfpNVFEskxpXtUkkCkl1UNF9cgLBkx48J4EXyALuBtAwNYIjF5kcmUU
abMKmMq1ULoiRbgsDEkTSsKSGFCJ6Z8vY/2xYiSacmtyAfCDdCNTVZoVF8vSTQOoEwSnOrngBkws
MYGMBMg8/bMBLSYKS7pYEXP0PqT+ZmBT0Xuy+Pplj5yn4aM9nk72JD8/Wi+Gr98sD9eWSMOwkapD
BbUv91XSvmyVkICt2tmXR4tWmrcUCsjWOpw87YidEC8i0gdTSOFhouJUNxR+4NYBG0MftoCTD9F7
2rTtxG3oPwY1b2HncYwhrlmj6Wq924xtGDWqfdNxap+OYxplEurnMVo9RWks+rH8qKEtx7kZT5zJ
4H7oOFclrN6uFe+d+nW2aIUsSgs/42EIPuOhXq+jEo3S6tX6w2ilNkDnIpHCWdEQhFgwj9pkk7FN
l/y5eQvRSIQ5+TrL05lewxWpt/Lbhes5cJF3mLET1MGhcKCF+40tNWnUulxrpojwDo2sObdje3Bz
N3QeHqf3D7OjEXMVV8LN3ZlvuzoWHqiUcNKHtwNd0IbvPGKYYM31nPKCgkUILw3KL+Y8l7aO1ArS
Ad37nIU0fCj5NE5gQCuC5sOSu+UdI2NeXg/lFkQIlFpdWVaWZRfvqGiirC9o6liJ9FXGYrSY9mI1
D/Ncozgn13vJvsznr7DnkJWXsyMH7e42ljdJ+aqNDF1bFnKWFLdj31xtaJYK6EXFgqmV/ymD/ROG
+n8O9H8f5vsGOWXsL1+1k3g=
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJydVW2P2jgQ/s6vmAZQoVpA9/WkqqJaTou0u6x2uZVOVWWZZEKsS+yc7UDpr+84bziQbauLxEvs
eXnsZ56ZIWwTYSAWKUJWGAs7hMJgBEdhEwiMKnSIsBNywUMrDtziPBYmCeBDrFUG7v8HmCTW5n8u
Fu7NJJim81Bl08EQTqqAkEupLOhCgrAQCY2hTU+DQVxIiqgkRNiEBphFEKy+kd1BaFvwFOUBuIxA
oy20BKtAKp3xFMo0QNtCK5mhtMEA6BmSpUELKo38TThwLfguRVNaiRgs0llnEoIR29zfstf18/bv
5T17Wm7vAiiN3ONCzfbfwC3DtWXXDqHfAGX0q6z/bO82j3ebh1VwnbrduwTQbvwcRtesAfMGor/W
L3fs6Xnz8LRlm9fV8/P61sM0LDNwCZjl9gSpCokJRzpryGQ5t8kNGFUt51QjOZGu0Mj35FlYlXEr
yC09EVOp4lEXfF84Lz1qbhBsgl59vDedXI3rTV03xipduSgt9kLytI3XmBp3aV6MPoMQGNUU62T6
uQdeefTy1Hfj10zVHg2pq8fXDoHBiOv94csfXwN49xECqWREy7pwukKfvxdMY2j23vXDPuuxxeE+
JOdCOhxCE3N44B1ZeSLuZh8Mmkr2wEPAmPfKWHA2uxIRjEopdbQYjDz3BWOf14/scfmwoki1eQvX
ExBdF60Mqh+Y/QcX4uiH4Amwzx79KOVFtbL63sXJbtcvy8/3q5rupmO5CnE91wBviQAhjUUegYpL
vVEbpLt2/W+PklRgq5Ku6mp+rpMhhCo/lXthQTxJ2ysO4Ka0ad97S7VT/n6YXus6fzk3fLnBZW5C
KDC6gSO62QDqgFqLCCtPmjegjnLeAdArtSE8VYGbAJ/aLb+vnQutFhk768E9uRbSxhCMzdgEveYw
IZ5ZqFKl6+kz7UR4U+buqQZXu9SIujrAfD7f0FXpozB4Q0gwp31H9mVTZGGC4b871/wm7lvyDLu1
FUyvTj/yvD66k3UPTs08x1AQQaGziOl0S1qRkPG9COtBTSTWM9NzQ4R64B+Px/l3tDzCgxv5C6Ni
e+QaF9xFWrxx0V/G5uvYQOdiZzvYpQUVQSIsTr1TTghI33GnPbTA7/GCqcE3oE3GZurq4HeQXQD6
32XS1ITj/qLjN72ob0hc5C9bzw8MhfmL
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9VG1P2zAQ/u5fcYQKNgTNPtN1WxlIQ4KCUEGaxuQ6yYVYSuzKdhqVX7+zk3bpy5YPUXL3PPfc
ne98DLNCWshliVDV1kGCUFvMoJGugMjq2qQIiVSxSJ1cCofD1BYRnOVGV0CfZ0N2DD91DalQSjsw
tQLpIJMGU1euvPe7QeJlkKzgWixlhnAt4aoUVsLnLBiy5NtbJWQ5THX1ZciYKKWwkOFaE04dUm6D
r/zh7pq/3D7Nnid3/HEy+wFHY/gEJydg0aFaQrBFgz1c5DG1IhTs+UZgsBC2GMFBlaeH+8dZXwcW
VPvCjXdlAvCfQsE7al0+07XjZvrSCUevR5dnkVeKlFYZmUztG4BdzL2u9KyLVabTU0bdfg7a0hgs
cSmUg6UwUiQl2iHrcbcVGNvPCiLOe7+cRwG13z9qRGgx2z6DHjfm/Op2yqeT+xvOLzs0PTKHDz2V
tkckFHoQfQRXoGJAj9el0FyJCmEMhzgMS4sB7KPOE2ExoLcSieYwDvR+cP8cg11gKkVJc2wRcm1g
QhYFlXiTaTfO2ki0fQoiFM4tLuO4aZrhOzqR4dIPcWx17hphMBY+Srwh7RTyN83XOWkcSPh1Pg/k
TXX/jbJTbMtUmcxZ+/bbqOsy82suFQg/BhdSOTRhMNBHlUarCpU7JzBhmkKmRejKOQzayQe6MWoa
n1wqWmuh6LZAaHxcdeqIlVLhIBJdO9/kbl0It2oEXQj+eGjJOuvOIR/YGRqvFhttUB2XTvLXYN2H
37CBdbW2W7j2r2+VsCn0doVWcFG1/4y1VwBjfwAyoZhD
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJx9UdEKgjAUfW6wfxjiIH+hEDKUFHSKLCMI7kNOEkIf9P9pTJ3OLJ/03HPPPed4Es9XS9qqwqgT
PbGKKOdXL4aAFS7A4gvAwgijuiKlqOpGlATS2NeMLE+TjJM9RkQ+SmqAXLrBo1LLIeLdiWlD6jZt
r7VNubWkndkXaxg5GO3UaOOKS6drO3luDDiO5my3iA0YAKGzPRV1ack8cOdhysI0CYzIPzjSiH5X
0QcvC8Lfaj0emsVKYF2rhL5L3fCkVjV76kShi59NHwDniAHzkgDgqBcwOgTMx+gDQQqXCw==
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4ipOLVEI8wwKCXX0iXf1C7Pl4spMU0hJTcvMS01RiPf3cYmHyQYE+fsGhCho
cCkAAUibEkTEVhWLMlUlLk6QGixStlyaeCyJDPHw9/Pw93VFsQguim4ZXAJoIUw5DhX47XUM8UCx
EchHtwsohN1bILUgw61c/Vy4AJYPYm4=
""")
##file activate.ps1
ACTIVATE_PS = convert("""
eJylWdmO41hyfW+g/0FTU7C7IXeJIqmtB/3AnZRIStxF2kaBm7gv4ipyMF/mB3+Sf8GXVGVl1tLT
43ECSqR4b5wbETeWE8z/+a///vNCDaN6cYtSf5G1dbNw/IVXNIu6aCvX9xa3qsgWl0IJ/7IYinbh
2nkOVqs2X0TNjz/8eeFFle826fBhQRaLBkD9uviw+LCy3Sbq7Mb/UNbrH3+YNtLcVaB+Xbipb+eL
tly0eVsD/M6u6g8//vC+dquobH5VWU75eMFUdvHb4n02RHlXuHYTFfmHbHCLLLNz70NpN+GrBI4p
1EeSk4FAXaZR88u0vPip8usi7fznt3fvP+OuPnx49/Pil4td+XnzigIAPoqYQH2J8v4z+C+8b98m
Q25t7k76LIK0cOz0V89/MXXx0+Lf6z5q3PA/F+/FIif9uqnaadFf/PzXSXYBfqIb2NeApecJwPzI
dlL/149nnvyoc7KqYfzTAT8v/voUmX7e+3n364tffl/oVaDyswKY/7J18e6bve8Wv9RuUfqfLHmK
/u139Hwx+9ePRep97KKqae30YwmCo2y+0vTz1k+rv7159B3pb1SOGj97Pe8/flfkC1Vn/7xYR4n6
lypNEGDDV5f7lcjil3S+4++p881Wv6qKyn5GQg1yJwcp4BZ5E+Wt/z1P/umbiHir4J8Xip/eFt6n
9T/9gU9eY+7zUX97Jlmb136ziKrKT/3OzpvP8VX/+MObSP0lL3LvVZlJ9v1b8357jXyw8rXxYPXN
11n4UzJ8G8S/vUbuJ6RPj999DbtS5kys//JusXwrNLnvT99cFlBNwXCe+niRz8JF/ezNr9Pze+H6
18W7d5PPvozW7+387Zto/v4pL8BvbxTzvIW9KCv/Fj0WzVQb/YXbVlPZWTz3/9vCaRtQbPN/Bb+j
2rUrDxTVD68gfQXu/ZewAFX53U/vf/rD2P3558W7+W79Po1y/xXoX/6RFHyNIoVjgAG4H0RTcAe5
3bSVv3DSwk2mZYHjFB8zj6fC4sLOFTHJJQrwzFYJgso0ApOoBzFiRzzQKjIQCCbQMIFJGCKqGUyS
8AkjiF2wTwmMEbcEUvq8Nj+X0f4YcCQmYRiOY7eRbAJDqzm1chOoNstbJ8oTBhZQ2NcfgaB6QjLp
U4+SWFjQGCZpyqby8V4JkPGs9eH1BscXIrTG24QxXLIgCLYNsIlxSYLA6SjAeg7HAg4/kpiIB8k9
TCLm0EM4gKIxEj8IUj2dQeqSxEwYVH88qiRlCLjEYGuNIkJB1BA5dHOZdGAoUFk54WOqEojkuf4Q
Ig3WY+96TDlKLicMC04h0+gDCdYHj0kz2xBDj9ECDU5zJ0tba6RKgXBneewhBG/xJ5m5FX+WSzsn
wnHvKhcOciw9NunZ0BUF0n0IJAcJMdcLqgQb0zP19dl8t9PzmMBjkuIF7KkvHgqEovUPOsY0PBB1
HCtUUhch83qEJPjQcNQDsgj0cRqx2ZbnnlrlUjE1EX2wFJyyDa/0GLrmKDEFepdWlsbmVU45Wiwt
eFM6mfs4kxg8yc4YmKDy67dniLV5FUeO5AKNPZaOQQ++gh+dXE7dbJ1aTDr7S4WPd8sQoQkDyODg
XnEu/voeKRAXZxB/e2xaJ4LTFLPYEJ15Ltb87I45l+P6OGFA5F5Ix8A4ORV6M1NH1uMuZMnmFtLi
VpYed+gSq9JDBoHc05J4OhKetrk1p0LYiKipxLMe3tYS7c5V7O1KcPU8BJGdLfcswhoFCSGQqJ8f
ThyQKy5EWFtHVuNhvTnkeTc8JMpN5li3buURh0+3ZGuzdwM55kon+8urbintjdQJf9U1D0ah+hNh
i1XNu4fSKbTC5AikGEaj0CYM1dpuli7EoqUt7929f1plxGGNZnixFSFP2qzhlZMonu2bB9OWSqYx
VuHKWNGJI8kqUhMTRtk0vJ5ycZ60JlodlmN3D9XiEj/cG2lSt+WV3OtMgt1Tf4/Z+1BaCus740kx
Nvj78+jMd9tq537Xz/mNFyiHb0HdwHytJ3uQUzKkYhK7wjGtx3oKX43YeYoJVtqDSrCnQFzMemCS
2bPSvP+M4yZFi/iZhAjL4UOeMfa7Ex8HKBqw4umOCPh+imOP6yVTwG2MplB+wtg97olEtykNZ6wg
FJBNXSTJ3g0CCTEEMdUjjcaBDjhJ9fyINXgQVHhA0bjk9lhhhhOGzcqQSxYdj3iIN2xGEOODx4qj
Q2xikJudC1ujCVOtiRwhga5nPdhe1gSa649bLJ0wCuLMcEYIeSy25YcDQHJb95nfowv3rQnin0fE
zIXFkM/EwSGxvCCMgEPNcDp/wph1gMEa8Xd1qAWOwWZ/KhjlqzgisBpDDDXz9Cmov46GYBKHC4zZ
84HJnXoTxyWNBbXV4LK/r+OEwSN45zBp7Cub3gIYIvYlxon5BzDgtPUYfXAMPbENGrI+YVGSeTQ5
i8NMB5UCcC+YRGIBhgs0xhAGwSgYwywpbu4vpCSTdEKrsy8osXMUnHQYenQHbOBofLCNNTg3CRRj
A1nXY2MZcjnXI+oQ2Zk+561H4CqoW61tbPKv65Y7fqc3TDUF9CA3F3gM0e0JQ0TPADJFJXVzphpr
2FzwAY8apGCju1QGOiUVO5KV6/hKbtgVN6hRVwpRYtu+/OC6w2bCcGzZQ8NCc4WejNEjFxOIgR3o
QqR1ZK0IaUxZ9nbL7GWJIjxBARUhAMnYrq/S0tVOjzlOSYRqeIZxaSaOBX5HSR3MFekOXVdUPbjX
nru61fDwI8HRYPUS7a6Inzq9JLjokU6P6OzT4UCH+Nha+JrU4VqEo4rRHQJhVuulAnvFhYz5NWFT
aS/bKxW6J3e46y4PLagGrCDKcq5B9EmP+s1QMCaxHNeM7deGEV3WPn3CeKjndlygdPyoIcNaL3dd
bdqPs47frcZ3aNWQ2Tk+rjFR01Ul4XnQQB6CSKA+cZusD0CP3F2Ph0e78baybgioepG12luSpFXi
bHbI6rGLDsGEodMObDG7uyxfCeU+1OiyXYk8fnGu0SpbpRoEuWdSUlNi5bd9nBxYqZGrq7Qa7zV+
VLazLcelzzP9+n6+xUtWx9OVJZW3gk92XGGkstTJ/LreFVFF2feLpXGGuQqq6/1QbWPyhJXIXIMs
7ySVlzMYqoPmnmrobbeauMIxrCr3sM+qs5HpwmmFt7SM3aRNQWpCrmeAXY28EJ9uc966urGKBL9H
18MtDE5OX97GDOHxam11y5LCAzcwtkUu8wqWI1dWgHyxGZdY8mC3lXzbzncLZ2bIUxTD2yW7l9eY
gBUo7uj02ZI3ydUViL7oAVFag37JsjYG8o4Csc5R7SeONGF8yZP+7xxi9scnHvHPcogJ44VH/LMc
Yu6Vn3jEzCFw9Eqq1ENQAW8aqbUwSiAqi+nZ+OkZJKpBL66Bj8z+ATqb/8qDIJUeNRTwrI0YrVmb
9FArKVEbCWUNSi8ipfVv+STgkpSsUhcBg541eeKLoBpLGaiHTNoK0r4nn3tZqrcIULtq20Df+FVQ
Sa0MnWxTugMuzD410sQygF4qdntbswiJMqjs014Irz/tm+pd5oygJ0fcdNbMg165Pqi7EkYGAXcB
dwxioCDA3+BY9+JjuOmJu/xyX2GJtaKSQcOZxyqFzTaa6/ot21sez0BtKjirROKRm2zuai02L0N+
ULaX8H5P6VwsGPbYOY7sAy5FHBROMrMzFVPYhFHZ7M3ZCZa2hsT4jGow6TGtG8Nje9405uMUjdF4
PtKQjw6yZOmPUmO8LjFWS4aPCfE011N+l3EdYq09O3iQJ9a01B3KXiMF1WmtZ+l1gmyJ/ibAHZil
vQzdOl6g9PoSJ4TM4ghTnTndEVMOmsSSu+SCVlGCOLQRaw9oLzamSWP62VuxPZ77mZYdfTRGuNBi
KyhZL32S2YckO/tU7y4Bf+QKKibQSKCTDWPUwWaE8yCBeL5FjpbQuAlb53mGX1jptLeRotREbx96
gnicYz0496dYauCjpTCA4VA0cdLJewzRmZeTwuXWD0talJsSF9J1Pe72nkaHSpULgNeK1+o+9yi0
YpYwXZyvaZatK2eL0U0ZY6ekZkFPdC8JTF4Yo1ytawNfepqUKEhwznp6HO6+2l7L2R9Q3N49JMIe
Z+ax1mVaWussz98QbNTRPo1xu4W33LJpd9H14dd66ype7UktfEDi3oUTccJ4nODjwBKFxS7lYWiq
XoHu/b7ZVcK5TbRD0F/2GShg2ywwUl07k4LLqhofKxFBNd1grWY+Zt/cPtacBpV9ys2z1moMLrT3
W0Elrjtt5y/dvDQYtObYS97pqj0eqmwvD3jCPRqamGthLiF0XkgB6IdHLBBwDGPiIDh7oPaRmTrN
tYA/yQKFxRiok+jM6ciJq/ZgiOi5+W4DEmufPEubeSuYJaM3/JHEevM08yJAXUQwb9LS2+8FOfds
FfOe3Bel6EDSjIEIKs4o9tyt67L1ylQlzhe0Q+7ue/bJnWMcD3q6wDSIQi8ThnRM65aqLWesi/ZM
xhHmQvfKBbWcC194IPjbBLYR9JTPITbzwRcu+OSFHDHNSYCLt29sAHO6Gf0h/2UO9Xwvhrjhczyx
Ygz6CqP4IwxQj5694Q1Pe2IR+KF/yy+5PvCL/vgwv5mPp9n4kx7fnY/nmV++410qF/ZVCMyv5nAP
pkeOSce53yJ6ahF4aMJi52by1HcCj9mDT5i+7TF6RoPaLL+cN1hXem2DmX/mdIbeeqwQOLD5lKO/
6FM4x77w6D5wMx3g0IAfa2D/pgY9a7bFQbinLDPz5dZi9ATIrd0cB5xfC0BfCCZO7TKP0jQ2Meih
nRXhkA3smTAnDN9IW2vA++lsgNuZ2QP0UhqyjUPrDmgfWP2bWWiKA+YiEK7xou8cY0+d3/bk0oHR
QLrq4KzDYF/ljQDmNhBHtkVNuoDey6TTeaD3SHO/Bf4d3IwGdqQp6FuhmwFbmbQBssDXVKDBYOpk
Jy7wxOaSRwr0rDmGbsFdCM+7XU/84JPu3D/gW7QXgzlvbjixn99/8CpWFUQWHFEz/RyXvzNXTTOd
OXLNNFc957Jn/YikNzEpUdRNxXcC6b76ccTwMGoKj5X7c7TvHFgc3Tf4892+5A+iR+D8OaaE6ACe
gdgHcyCoPm/xiDCWP+OZRjpzfj5/2u0i4qQfmIEOsTV9Hw6jZ3Agnh6hiwjDtGYxWvt5TiWEuabN
77YCyRXwO8P8wdzG/8489KwfFBZWI6Vvx76gmlOc03JI1HEfXYZEL4sNFQ3+bqf7e2hdSWQknwKF
ICJjGyDs3fdmnnxubKXebpQYLjPgEt9GTzKkUgTvOoQa1J7N3nv4sR6uvYFLhkXZ+pbCoU3K9bfq
gF7W82tNutRRZExad+k4GYYsCfmEbvizS4jsRr3fdzqjEthpEwm7pmN7OgVzRbrktjrFw1lc0vM8
V7dyTJ71qlsd7v3KhmHzeJB35pqEOk2pEe5uPeCToNkmedmxcKbIj+MZzjFSsvCmimaMQB1uJJKa
+hoWUi7aEFLvIxKxJavqpggXBIk2hr0608dIgnfG5ZEprqmH0b0YSy6jVXTCuIB+WER4d5BPVy9Q
M4taX0RIlDYxQ2CjBuq78AAcHQf5qoKP8BXHnDnd/+ed5fS+csL4g3eWqECaL+8suy9r8hx7c+4L
EegEWdqAWN1w1NezP34xsxLkvRRI0DRzKOg0U+BKfQY128YlYsbwSczEg2LqKxRmcgiwHdhc9MQJ
IwKQHlgBejWeMGDYYxTOQUiJOmIjJbzIzHH6lAMP+y/fR0v1g4wx4St8fcqTt3gz5wc+xXFZZ3qI
JpXI5iJk7xmNL2tYsDpcqu0375Snd5EKsIvg8u5szTOyZ4v06Ny2TZXRpHUSinh4IFp8Eoi7GINJ
02lPJnS/9jSxolJwp2slPMIEbjleWw3eec4XaetyEnSSqTPRZ9fVA0cPXMqzrPYQQyrRux3LaAh1
wujbgcObg1nt4iiJ5IMbc/WNPc280I2T4nTkdwG8H6iS5xO2WfsFsruBwf2QkgZlb6w7om2G65Lr
r2Gl4dk63F8rCEHoUJ3fW+pU2Srjlmcbp+JXY3DMifEI22HcHAvT7zzXiMTr7VbUR5a2lZtJkk4k
1heZZFdru8ucCWMTr3Z4eNnjLm7LW7rcN7QjMpxrsCzjxndeyFUX7deIs3PQkgyH8k6luI0uUyLr
va47TBjM4JmNHFzGPcP6BV6cYgQy8VQYZe5GmzZHMxyBYhGiUdekZQ/qwyxC3WGylQGdUpSf9ZCP
a7qPdJd31fPRC0TOgzupO7nLuBGr2A02yuUQwt2KQG31sW8Gd9tQiHq+hPDt4OzJuY4pS8XRsepY
tsd7dVEfJFmc15IYqwHverrpWyS1rFZibDPW1hUUb+85CGUzSBSTK8hpvee/ZxonW51TUXekMy3L
uy25tMTg4mqbSLQQJ+skiQu2toIfBFYrOWql+EQipgfT15P1aq6FDK3xgSjIGWde0BPftYchDTdM
i4QdudHFkN0u6fSKiT09QLv2mtSblt5nNzBR6UReePNs+khE4rHcXuoK21igUKHl1c3MXMgPu7y8
rKQDxR6N/rffXv+lROXet/9Q+l9I4D1U
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV1uL4zYUfvevOE0ottuMW9q3gVDa3aUMXXbLMlDKMBiNrSTqOJKRlMxkf33PkXyRbGe7Dw2E
UXTu37lpxLFV2oIyifAncxmOL0xLIfcG+gv80x9VW6maw7o/CANSWWBwFtqeWMPlGY6qPjV8A0bB
C4eKSTgZ5LRgFeyErMEeOBhbN+Ipgeizhjtnhkn7DdyjuNLPoCS0l/ayQTG0djwZC08cLXozeMss
aG5EzQ0IScpnWtHSTXuxByV/QCmxE7y+eS0uxWeoheaVVfqSJHiU7Mhhi6gULbOHorshkrEnKxpT
0n3A8Y8SMpuwZx6aoix3ouFlmW8gHRSkeSJ2g7hU+kiHLDaQw3bmRDaTGfTnty7gPm0FHbIBg9U9
oh1kZzAFLaue2R6htPCtAda2nGlDSUJ4PZBgCJBGVcwKTAMz/vJiLD+Oin5Z5QlvDPdulC6EsiyE
NFzb7McNTKJzbJqzphx92VKRFY1idenzmq3K0emRcbWBD0ryqc4NZGmKOOOX9Pz5x+/l27tP797c
f/z0d+4NruGNai8uAM0bfsYaw8itFk8ny41jsfpyO+BWlpqfhcG4yxLdi/0tQqoT4a8Vby382mt8
p7XSo7aWGdPBc+b6utaBmCQ7rQKQoWtAuthQCiold2KfJIPTT8xwg9blPumc+YDZC/wYGdAyHpJk
vUbHbHWAp5No6pK/WhhLEWrFjUwtPEv1Agf8YmnsuXUQYkeZoHm8ogP16gt2uHoxcEMdf2C6pmbw
hUMsWGhanboh4IzzmsIpWs134jVPqD/c74bZHdY69UKKSn/+KfVhxLgUlToemayLMYQOqfEC61bh
cbhwaqoGUzIyZRFHPmau5juaWqwRn3mpWmoEA5nhzS5gog/5jbcFQqOZvmBasZtwYlG93k5GEiyw
buHhMWLjDarEGpMGB2LFs5nIJkhp/nUmZneFaRth++lieJtHepIvKgx6PJqIlD9X2j6pG1i9x3pZ
5bHuCPFiirGHeO7McvoXkz786GaKVzC9DSpnOxJdc4xm6NSVq7lNEnKdVlnpu9BNYoKX2Iq3wvgh
gGEUM66kK6j4NiyoneuPLSwaCWDxczgaolEWpiMyDVDb7dNuLAbriL8ig8mmeju31oNvQdpnvEPC
1vAXbWacGRVrGt/uXN/gU0CDDwgooKRrHfTBb1/s9lYZ8ZqOBU0yLvpuP6+K9hLFsvIjeNhBi0KL
MlOuWRn3FRwx5oHXjl0YImUx0+gLzjGchrgzca026ETmYJzPD+IpuKzNi8AFn048Thd63OdD86M6
84zE8yQm0VqXdbbgvub2pKVnS76icBGdeTHHXTKspUmr4NYo/furFLKiMdQzFjHJNcdAnMhltBJK
0/IKX3DVFqvPJ2dLE7bDBkH0l/PJ29074+F0CsGYOxsb7U3myTUncYfXqnLLfa6sJybX4g+hmcjO
kMRBfA1JellfRRKJcyRpxdS4rIl6FdmQCWjo/o9Qz7yKffoP4JHjOvABcRn4CZIT2RH4jnxmfpVG
qgLaAvQBNfuO6X0/Ux02nb4FKx3vgP+XnkX0QW9pLy/NsXgdN24dD3LxO2Nwil7Zlc1dqtP3d7/h
kzp1/+7hGBuY4pk0XD/0Ao/oTe/XGrfyM773aB7iUhgkpy+dwAMalxMP0DrBcsVw/6p25+/hobP9
GBknrWExDhLJ1bwt1NcCNblaFbMKCyvmX0PeRaQ=
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNU01v2zAMvetXEB4K21jnDOstQA4dMGCHbeihlyEIDMWmE62yJEiKE//7kXKdpEWLzYBt8evx
kRSzLPs6wiEoswM8YdMpjUXcq1Dz6RZa1cSiTkJdr86GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe
5a3p0cRKiEe2NtLAFikftnDco0ko/SFEVgEZ8aRCZDIPY9xbA8pE9M4jfW/B2CjiHq9zbJVZuOQq
siwTIvpxKYCembPAU4Muwi/Z4zfvrZ/MXipKeB8C+qisSZYiWfjJfs+0/MFMdWn1hJcO5U7G/SLa
xVx8zU6VG/PXLXvfsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCJN9dzKaoexyB/uH79TnjwvxcW0ntSb
yZ8jq1Z5Q1UXsyy3gf9nbjTEj7NzQMfCJa/YSmrQ+2D/BqfiOi6sclrGzvoeVivIj8rcfcmnIQRF
7XCyeZI7DFe5/lhlCs5PRf5QW66VXT/NrlQ46oD/D6InkOmi3IQcbhKxAX2g4a+Xd5s3UtCtG2py
m8eg6WYWqR6SL5OjKMGfSrYt/6kxxQtOpeAgj1LXBNmpE2ElmCSIy5H0zFd8gJ924HWijWhb2hRC
6wNEm1QdDZtuSZcEprIUBo/XRNcbQe1OUbQ/r3hPTaPJJDNtFLu8KHV5XoNr3Eo6h6YtOKw8e8yw
VF5PnJ+ts3a9/Mz38RpG/AUSzYUW
""")
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
FAT_MAGIC = 0xcafebabe
BIG_ENDIAN = '>'
LITTLE_ENDIAN = '<'
LC_LOAD_DYLIB = 0xc
maxint = majver == 3 and getattr(sys, 'maxsize') or getattr(sys, 'maxint')
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file.
Modified from macholib.
"""
def __init__(self, fileobj, start=0, size=maxint):
if isinstance(fileobj, fileview):
self._fileobj = fileobj._fileobj
else:
self._fileobj = fileobj
self._start = start
self._end = start + size
self._pos = 0
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._pos
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == os.SEEK_SET:
seekto += self._start
elif whence == os.SEEK_CUR:
seekto += self._start + self._pos
elif whence == os.SEEK_END:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
self._pos = seekto - self._start
def write(self, bytes):
here = self._start + self._pos
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.seek(here, os.SEEK_SET)
self._fileobj.write(bytes)
self._pos += len(bytes)
def read(self, size=maxint):
assert size >= 0
here = self._start + self._pos
self._checkwindow(here, 'read')
size = min(size, self._end - here)
self._fileobj.seek(here, os.SEEK_SET)
bytes = self._fileobj.read(size)
self._pos += len(bytes)
return bytes
def read_data(file, endian, num=1):
"""
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
"""
res = struct.unpack(endian + 'L' * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
def mach_o_change(path, what, value):
"""
Replace a given name (what) in any LC_LOAD_DYLIB command found in
the given binary with a new name (value), provided it's shorter.
"""
def do_macho(file, bits, endian):
# Read Mach-O header (the magic number is assumed read by the caller)
cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = read_data(file, endian, 6)
# 64-bits header has one more field.
if bits == 64:
read_data(file, endian)
# The header is followed by ncmds commands
for n in range(ncmds):
where = file.tell()
# Read command header
cmd, cmdsize = read_data(file, endian, 2)
if cmd == LC_LOAD_DYLIB:
# The first data field in LC_LOAD_DYLIB commands is the
# offset of the name, starting from the beginning of the
# command.
name_offset = read_data(file, endian)
file.seek(where + name_offset, os.SEEK_SET)
# Read the NUL terminated string
load = file.read(cmdsize - name_offset).decode()
load = load[:load.index('\0')]
# If the string is what is being replaced, overwrite it.
if load == what:
file.seek(where + name_offset, os.SEEK_SET)
file.write(value.encode() + '\0'.encode())
# Seek to the next command
file.seek(where + cmdsize, os.SEEK_SET)
def do_file(file, offset=0, size=maxint):
file = fileview(file, offset, size)
# Read magic number
magic = read_data(file, BIG_ENDIAN)
if magic == FAT_MAGIC:
# Fat binaries contain nfat_arch Mach-O binaries
nfat_arch = read_data(file, BIG_ENDIAN)
for n in range(nfat_arch):
# Read arch header
cputype, cpusubtype, offset, size, align = read_data(file, BIG_ENDIAN, 5)
do_file(file, offset, size)
elif magic == MH_MAGIC:
do_macho(file, 32, BIG_ENDIAN)
elif magic == MH_CIGAM:
do_macho(file, 32, LITTLE_ENDIAN)
elif magic == MH_MAGIC_64:
do_macho(file, 64, BIG_ENDIAN)
elif magic == MH_CIGAM_64:
do_macho(file, 64, LITTLE_ENDIAN)
assert(len(what) >= len(value))
do_file(open(path, 'r+b'))
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
|
gpl-2.0
| 2,259,157,434,048,045,600 | 1,250,885,879,086,232,600 | 40.999578 | 238 | 0.65751 | false |
damdam-s/OpenUpgrade
|
addons/mrp/wizard/mrp_workcenter_load.py
|
381
|
2222
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_workcenter_load(osv.osv_memory):
_name = 'mrp.workcenter.load'
_description = 'Work Center Load'
_columns = {
'time_unit': fields.selection([('day', 'Day by day'),('week', 'Per week'),('month', 'Per month')],'Type of period', required=True),
'measure_unit': fields.selection([('hours', 'Amount in hours'),('cycles', 'Amount in cycles')],'Amount measuring unit', required=True),
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Work Center Load
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['time_unit','measure_unit'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'mrp.workcenter.load',
'datas' : datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -3,844,029,894,469,113,000 | 4,979,420,216,122,126,000 | 39.4 | 143 | 0.587759 | false |
jimporter/bfg9000
|
test/unit/platforms/test_target.py
|
1
|
4657
|
from unittest import mock
from .. import *
from bfg9000.platforms import platform_name, target, posix
class TestTargetPlatform(TestCase):
def setUp(self):
platform_name._reset()
def tearDown(self):
platform_name._reset()
def test_default(self):
with mock.patch('platform.system', return_value='Linux'), \
mock.patch('platform.machine', return_value='i686'): # noqa
platform = target.platform_info()
self.assertEqual(platform.name, 'linux')
self.assertEqual(platform.species, 'linux')
self.assertEqual(platform.genus, 'linux')
self.assertEqual(platform.family, 'posix')
self.assertEqual(platform.triplet, 'i686-pc-linux-gnu')
self.assertEqual(platform.object_format, 'elf')
def test_cygwin(self):
with mock.patch('platform.machine', return_value='x86_64'):
platform = target.platform_info('cygwin')
self.assertEqual(platform.name, 'cygwin')
self.assertEqual(platform.species, 'cygwin')
self.assertEqual(platform.genus, 'cygwin')
self.assertEqual(platform.family, 'posix')
self.assertEqual(platform.triplet, 'x86_64-unknown-windows-cygnus')
self.assertEqual(platform.object_format, 'coff')
windows = target.platform_info('cygwin')
posix = target.platform_info('linux')
for i in ('object_format', 'executable_ext', 'shared_library_ext',
'has_import_library', 'has_versioned_library'):
self.assertEqual(getattr(platform, i), getattr(windows, i))
for i in ('has_frameworks', 'install_dirs'):
self.assertEqual(getattr(platform, i), getattr(posix, i))
def test_darwin(self):
with mock.patch('platform.machine', return_value='x86_64'):
platform = target.platform_info('macos')
self.assertEqual(platform.name, 'macos')
self.assertEqual(platform.species, 'macos')
self.assertEqual(platform.genus, 'darwin')
self.assertEqual(platform.family, 'posix')
self.assertEqual(platform.triplet, 'x86_64-apple-darwin')
self.assertEqual(platform.object_format, 'mach-o')
def test_linux(self):
with mock.patch('platform.machine', return_value='x86_64'):
platform = target.platform_info('linux')
self.assertEqual(platform.name, 'linux')
self.assertEqual(platform.species, 'linux')
self.assertEqual(platform.genus, 'linux')
self.assertEqual(platform.family, 'posix')
self.assertEqual(platform.triplet, 'x86_64-unknown-linux-gnu')
self.assertEqual(platform.object_format, 'elf')
def test_android(self):
with mock.patch('platform.machine', return_value='arm'):
platform = target.platform_info('android')
self.assertEqual(platform.name, 'android')
self.assertEqual(platform.species, 'android')
self.assertEqual(platform.genus, 'linux')
self.assertEqual(platform.family, 'posix')
self.assertEqual(platform.triplet, 'arm-unknown-linux-android')
self.assertEqual(platform.object_format, 'elf')
def test_windows(self):
with mock.patch('platform.machine', return_value='x86_64'):
platform = target.platform_info('winnt')
self.assertEqual(platform.name, 'winnt')
self.assertEqual(platform.species, 'winnt')
self.assertEqual(platform.genus, 'winnt')
self.assertEqual(platform.family, 'windows')
self.assertEqual(platform.triplet, 'x86_64-unknown-win32')
self.assertEqual(platform.object_format, 'coff')
def test_unknown(self):
with mock.patch('platform.machine', return_value='x86_64'):
platform = target.platform_info('onosendai')
self.assertEqual(platform.name, 'onosendai')
self.assertEqual(platform.species, 'onosendai')
self.assertEqual(platform.genus, 'onosendai')
self.assertEqual(platform.family, 'posix')
self.assertEqual(platform.object_format, 'elf')
self.assertEqual(platform.triplet, 'x86_64-unknown-onosendai')
def test_equality(self):
a = posix.PosixTargetPlatform('linux', 'linux', 'x86_64')
b = posix.PosixTargetPlatform('linux', 'linux', 'x86_64')
c = posix.PosixTargetPlatform('linux', 'android', 'arm')
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
def test_json(self):
plat = posix.PosixTargetPlatform('linux', 'linux', 'x86_64')
json = plat.to_json()
self.assertEqual(target.from_json(json), plat)
|
bsd-3-clause
| 4,319,174,036,319,558,700 | -6,432,983,146,236,017,000 | 42.523364 | 75 | 0.646768 | false |
meabsence/python-for-android
|
python3-alpha/python3-src/Lib/tkinter/constants.py
|
375
|
1493
|
# Symbolic constants for Tk
# Booleans
NO=FALSE=OFF=0
YES=TRUE=ON=1
# -anchor and -sticky
N='n'
S='s'
W='w'
E='e'
NW='nw'
SW='sw'
NE='ne'
SE='se'
NS='ns'
EW='ew'
NSEW='nsew'
CENTER='center'
# -fill
NONE='none'
X='x'
Y='y'
BOTH='both'
# -side
LEFT='left'
TOP='top'
RIGHT='right'
BOTTOM='bottom'
# -relief
RAISED='raised'
SUNKEN='sunken'
FLAT='flat'
RIDGE='ridge'
GROOVE='groove'
SOLID = 'solid'
# -orient
HORIZONTAL='horizontal'
VERTICAL='vertical'
# -tabs
NUMERIC='numeric'
# -wrap
CHAR='char'
WORD='word'
# -align
BASELINE='baseline'
# -bordermode
INSIDE='inside'
OUTSIDE='outside'
# Special tags, marks and insert positions
SEL='sel'
SEL_FIRST='sel.first'
SEL_LAST='sel.last'
END='end'
INSERT='insert'
CURRENT='current'
ANCHOR='anchor'
ALL='all' # e.g. Canvas.delete(ALL)
# Text widget and button states
NORMAL='normal'
DISABLED='disabled'
ACTIVE='active'
# Canvas state
HIDDEN='hidden'
# Menu item types
CASCADE='cascade'
CHECKBUTTON='checkbutton'
COMMAND='command'
RADIOBUTTON='radiobutton'
SEPARATOR='separator'
# Selection modes for list boxes
SINGLE='single'
BROWSE='browse'
MULTIPLE='multiple'
EXTENDED='extended'
# Activestyle for list boxes
# NONE='none' is also valid
DOTBOX='dotbox'
UNDERLINE='underline'
# Various canvas styles
PIESLICE='pieslice'
CHORD='chord'
ARC='arc'
FIRST='first'
LAST='last'
BUTT='butt'
PROJECTING='projecting'
ROUND='round'
BEVEL='bevel'
MITER='miter'
# Arguments to xview/yview
MOVETO='moveto'
SCROLL='scroll'
UNITS='units'
PAGES='pages'
|
apache-2.0
| 544,014,699,039,416,060 | -3,855,943,260,899,890,700 | 12.572727 | 42 | 0.711989 | false |
CiscoSystems/quantum
|
neutron/plugins/cisco/extensions/_credential_view.py
|
24
|
1905
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ying Liu, Cisco Systems, Inc.
#
def get_view_builder(req):
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
"""ViewBuilder for Credential, derived from neutron.views.networks."""
def __init__(self, base_url):
"""Initialize builder.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build(self, credential_data, is_detail=False):
"""Generic method used to generate a credential entity."""
if is_detail:
credential = self._build_detail(credential_data)
else:
credential = self._build_simple(credential_data)
return credential
def _build_simple(self, credential_data):
"""Return a simple description of credential."""
return dict(credential=dict(id=credential_data['credential_id']))
def _build_detail(self, credential_data):
"""Return a detailed description of credential."""
return dict(credential=dict(id=credential_data['credential_id'],
name=credential_data['user_name'],
password=credential_data['password']))
|
apache-2.0
| -2,166,811,839,826,379,000 | -2,076,945,301,741,789,000 | 35.634615 | 78 | 0.655643 | false |
anandbhoraskar/Diamond
|
src/diamond/handler/mqtt.py
|
31
|
5740
|
# coding=utf-8
"""
Send metrics to an MQTT broker.
### Dependencies
* [mosquitto](http://mosquitto.org/documentation/python/)
* Python `ssl` module (and Python >= 2.7)
In order for this to do something useful, you'll need an
MQTT broker (e.g. [mosquitto](http://mosquitto.org) and
a `diamond.conf` containing something along these lines:
[server]
handlers = diamond.handler.mqtt.MQTTHandler
...
[handlers]
[[MQTTHandler]]
host = address-of-mqtt-broker (default: localhost)
port = 1883 (default: 1883; with tls, default: 8883)
qos = 0 (default: 0)
# If False, do not include timestamp in the MQTT payload
# i.e. just the metric number
timestamp = True
# Optional topic-prefix to prepend to metrics en-route to
# MQTT broker
prefix = some/pre/fix (default: "")
# If you want to connect to your MQTT broker with TLS, you'll have
# to set the following four parameters
tls = True (default: False)
cafile = /path/to/ca/cert.pem
certfile = /path/to/certificate.pem
keyfile = /path/to/key.pem
Test by launching an MQTT subscribe, e.g.:
mosquitto_sub -v -t 'servers/#'
or
mosquitto_sub -v -t 'some/pre/fix/#'
### To Graphite
You may be interested in
[mqtt2graphite](https://github.com/jpmens/mqtt2graphite)
which subscribes to an MQTT broker and sends metrics off to Graphite.
### Notes
* This handler sets a last will and testament, so that the broker
publishes its death at a topic called clients/diamond/<hostname>
* Support for reconnecting to a broker is implemented and ought to
work.
"""
from Handler import Handler
from diamond.collector import get_hostname
import os
HAVE_SSL = True
try:
import ssl
except ImportError:
HAVE_SSL = False
try:
import mosquitto
except ImportError:
mosquitto = None
__author__ = 'Jan-Piet Mens'
__email__ = '[email protected]'
class MQTTHandler(Handler):
"""
"""
def __init__(self, config=None):
"""
Create a new instance of the MQTTHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Data
self.mqttc = None
self.hostname = get_hostname(self.config)
self.client_id = "%s_%s" % (self.hostname, os.getpid())
# Initialize Options
self.host = self.config.get('host', 'localhost')
self.port = 0
self.qos = int(self.config.get('qos', 0))
self.prefix = self.config.get('prefix', "")
self.tls = self.config.get('tls', False)
self.timestamp = 0
try:
self.timestamp = self.config['timestamp']
if not self.timestamp:
self.timestamp = 1
else:
self.timestamp = 0
except:
self.timestamp = 1
if not mosquitto:
self.log.error('mosquitto import failed. Handler disabled')
self.enabled = False
return
# Initialize
self.mqttc = mosquitto.Mosquitto(self.client_id, clean_session=True)
if not self.tls:
self.port = int(self.config.get('port', 1883))
else:
# Set up TLS if requested
self.port = int(self.config.get('port', 8883))
self.cafile = self.config.get('cafile', None)
self.certfile = self.config.get('certfile', None)
self.keyfile = self.config.get('keyfile', None)
if None in [self.cafile, self.certfile, self.keyfile]:
self.log.error("MQTTHandler: TLS configuration missing.")
return
try:
self.mqttc.tls_set(
self.cafile,
certfile=self.certfile,
keyfile=self.keyfile,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=3,
ciphers=None)
except:
self.log.error("MQTTHandler: Cannot set up TLS " +
"configuration. Files missing?")
self.mqttc.will_set("clients/diamond/%s" % (self.hostname),
payload="Adios!", qos=0, retain=False)
self.mqttc.connect(self.host, self.port, 60)
self.mqttc.on_disconnect = self._disconnect
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MQTTHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MQTTHandler, self).get_default_config()
config.update({
})
return config
def process(self, metric):
"""
Process a metric by converting metric name to MQTT topic name;
the payload is metric and timestamp.
"""
if not mosquitto:
return
line = str(metric)
topic, value, timestamp = line.split()
if len(self.prefix):
topic = "%s/%s" % (self.prefix, topic)
topic = topic.replace('.', '/')
topic = topic.replace('#', '&') # Topic must not contain wildcards
if self.timestamp == 0:
self.mqttc.publish(topic, "%s" % (value), self.qos)
else:
self.mqttc.publish(topic, "%s %s" % (value, timestamp), self.qos)
def _disconnect(self, mosq, obj, rc):
self.log.debug("MQTTHandler: reconnecting to broker...")
mosq.reconnect()
|
mit
| -2,253,764,865,406,962,700 | -8,949,059,519,635,668,000 | 27.844221 | 78 | 0.569338 | false |
adviti/melange
|
app/soc/views/helper/url.py
|
1
|
1660
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers used to construct urls.
"""
def trim_url_to(url, limit):
"""Returns a version of url at most limit long.
"""
if not url:
return url
if len(url) > limit:
return '%s...' % url[:max(0, limit - 3)]
return url
URL_PATTERN = '<a href="%(url)s"%(target)s%(nofollow)s>%(name)s</a>'
def urlize(url, name=None, target="_blank", nofollow=True):
"""Make an url clickable.
Args:
url: the actual url, such as '/user/list'
name: the display name, such as 'List Users', defaults to url
target: the 'target' attribute of the <a> element
nofollow: whether to add the 'rel="nofollow"' attribute
"""
if not url:
return ''
from django.utils.safestring import mark_safe
from django.utils.html import escape
safe_url = escape(url)
safe_name = escape(name)
link = URL_PATTERN % {
'url': safe_url,
'name': safe_name if name else safe_url,
'target': ' target="%s"' % target if target else '',
'nofollow': ' rel="nofollow"' if nofollow else "",
}
return mark_safe(link)
|
apache-2.0
| -5,665,830,815,788,677,000 | -4,461,240,814,835,935,000 | 26.666667 | 74 | 0.672892 | false |
seann1/portfolio5
|
.meteor/dev_bundle/python/Lib/copy_reg.py
|
442
|
6800
|
"""Helper to provide extensibility for pickle/cPickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
from types import ClassType as _ClassType
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if type(ob_type) is _ClassType:
raise TypeError("copy_reg is not intended for use with classes")
if not hasattr(pickle_function, '__call__'):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not hasattr(object, '__call__'):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError, "can't pickle %s objects" % base.__name__
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, basestring):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: cPickle grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError, "code out of range"
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
|
gpl-2.0
| 4,978,687,998,582,695,000 | 4,318,906,424,513,444,000 | 32.830846 | 75 | 0.604853 | false |
probablytom/tomwallis.net
|
venv/lib/python2.7/site-packages/django/contrib/staticfiles/utils.py
|
114
|
1976
|
import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
|
artistic-2.0
| -9,005,804,250,842,067,000 | 6,046,890,543,282,384,000 | 31.933333 | 73 | 0.624494 | false |
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/net/data/verify_certificate_chain_unittest/generate-expired-target.py
|
5
|
1324
|
#!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediate, where the target is expired (violates
validity.notAfter). Verification is expected to fail."""
import common
# Self-signed root certificate (used as trust anchor).
root = common.create_self_signed_root_certificate('Root')
root.set_validity_range(common.JANUARY_1_2015_UTC, common.JANUARY_1_2016_UTC)
# Intermediate certificate.
intermediate = common.create_intermediate_certificate('Intermediate', root)
intermediate.set_validity_range(common.JANUARY_1_2015_UTC,
common.JANUARY_1_2016_UTC)
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediate)
target.set_validity_range(common.JANUARY_1_2015_UTC, common.MARCH_1_2015_UTC)
chain = [target, intermediate]
trusted = common.TrustAnchor(root, constrained=False)
# Both the root and intermediate are valid at this time, however the
# target is not.
time = common.MARCH_2_2015_UTC
verify_result = False
errors = """[Context] Processing Certificate
index: 1
[Error] Time is after notAfter
"""
common.write_test_file(__doc__, chain, trusted, time, verify_result, errors)
|
gpl-3.0
| 1,157,404,927,867,178,500 | -835,169,677,139,713,900 | 35.777778 | 79 | 0.747734 | false |
LokiCoder/Sick-Beard
|
lib/pythontwitter/__init__.py
|
23
|
150067
|
#!/usr/bin/env python
#
# vim: sw=2 ts=2 sts=2
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A library that provides a Python interface to the Twitter API'''
__author__ = '[email protected]'
__version__ = '1.0.1'
import base64
import calendar
import datetime
import httplib
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import gzip
import StringIO
try:
# Python >= 2.6
import json as simplejson
except ImportError:
try:
# Python < 2.6
import lib.simplejson as simplejson
except ImportError:
try:
# Google App Engine
from django.utils import simplejson
except ImportError:
raise ImportError, "Unable to load a json library"
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
import lib.oauth2 as oauth
CHARACTER_LIMIT = 140
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
class TwitterError(Exception):
'''Base class for Twitter errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Status(object):
'''A class representing the Status structure used by the twitter API.
The Status structure exposes the following properties:
status.created_at
status.created_at_in_seconds # read only
status.favorited
status.favorite_count
status.in_reply_to_screen_name
status.in_reply_to_user_id
status.in_reply_to_status_id
status.truncated
status.source
status.id
status.text
status.location
status.relative_created_at # read only
status.user
status.urls
status.user_mentions
status.hashtags
status.geo
status.place
status.coordinates
status.contributors
'''
def __init__(self,
created_at=None,
favorited=None,
favorite_count=None,
id=None,
text=None,
location=None,
user=None,
in_reply_to_screen_name=None,
in_reply_to_user_id=None,
in_reply_to_status_id=None,
truncated=None,
source=None,
now=None,
urls=None,
user_mentions=None,
hashtags=None,
media=None,
geo=None,
place=None,
coordinates=None,
contributors=None,
retweeted=None,
retweeted_status=None,
current_user_retweet=None,
retweet_count=None,
possibly_sensitive=None,
scopes=None,
withheld_copyright=None,
withheld_in_countries=None,
withheld_scope=None):
'''An object to hold a Twitter status message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
created_at:
The time this status message was posted. [Optional]
favorited:
Whether this is a favorite of the authenticated user. [Optional]
favorite_count:
Number of times this status message has been favorited. [Optional]
id:
The unique id of this status message. [Optional]
text:
The text of this status message. [Optional]
location:
the geolocation string associated with this message. [Optional]
relative_created_at:
A human readable string representing the posting time. [Optional]
user:
A twitter.User instance representing the person posting the
message. [Optional]
now:
The current time, if the client chooses to set it.
Defaults to the wall clock time. [Optional]
urls:
user_mentions:
hashtags:
geo:
place:
coordinates:
contributors:
retweeted:
retweeted_status:
current_user_retweet:
retweet_count:
possibly_sensitive:
scopes:
withheld_copyright:
withheld_in_countries:
withheld_scope:
'''
self.created_at = created_at
self.favorited = favorited
self.favorite_count = favorite_count
self.id = id
self.text = text
self.location = location
self.user = user
self.now = now
self.in_reply_to_screen_name = in_reply_to_screen_name
self.in_reply_to_user_id = in_reply_to_user_id
self.in_reply_to_status_id = in_reply_to_status_id
self.truncated = truncated
self.retweeted = retweeted
self.source = source
self.urls = urls
self.user_mentions = user_mentions
self.hashtags = hashtags
self.media = media
self.geo = geo
self.place = place
self.coordinates = coordinates
self.contributors = contributors
self.retweeted_status = retweeted_status
self.current_user_retweet = current_user_retweet
self.retweet_count = retweet_count
self.possibly_sensitive = possibly_sensitive
self.scopes = scopes
self.withheld_copyright = withheld_copyright
self.withheld_in_countries = withheld_in_countries
self.withheld_scope = withheld_scope
def GetCreatedAt(self):
'''Get the time this status message was posted.
Returns:
The time this status message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this status message was posted.
Args:
created_at:
The time this status message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this status message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this status message was posted, in seconds since the epoch.
Returns:
The time this status message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this status message was "
"posted, in seconds since the epoch")
def GetFavorited(self):
'''Get the favorited setting of this status message.
Returns:
True if this status message is favorited; False otherwise
'''
return self._favorited
def SetFavorited(self, favorited):
'''Set the favorited state of this status message.
Args:
favorited:
boolean True/False favorited state of this status message
'''
self._favorited = favorited
favorited = property(GetFavorited, SetFavorited,
doc='The favorited state of this status message.')
def GetFavoriteCount(self):
'''Get the favorite count of this status message.
Returns:
number of times this status message has been favorited
'''
return self._favorite_count
def SetFavoriteCount(self, favorite_count):
'''Set the favorited state of this status message.
Args:
favorite_count:
int number of favorites for this status message
'''
self._favorite_count = favorite_count
favorite_count = property(GetFavoriteCount, SetFavoriteCount,
doc='The number of favorites for this status message.')
def GetId(self):
'''Get the unique id of this status message.
Returns:
The unique id of this status message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this status message.
Args:
id:
The unique id of this status message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this status message.')
def GetInReplyToScreenName(self):
return self._in_reply_to_screen_name
def SetInReplyToScreenName(self, in_reply_to_screen_name):
self._in_reply_to_screen_name = in_reply_to_screen_name
in_reply_to_screen_name = property(GetInReplyToScreenName, SetInReplyToScreenName,
doc='')
def GetInReplyToUserId(self):
return self._in_reply_to_user_id
def SetInReplyToUserId(self, in_reply_to_user_id):
self._in_reply_to_user_id = in_reply_to_user_id
in_reply_to_user_id = property(GetInReplyToUserId, SetInReplyToUserId,
doc='')
def GetInReplyToStatusId(self):
return self._in_reply_to_status_id
def SetInReplyToStatusId(self, in_reply_to_status_id):
self._in_reply_to_status_id = in_reply_to_status_id
in_reply_to_status_id = property(GetInReplyToStatusId, SetInReplyToStatusId,
doc='')
def GetTruncated(self):
return self._truncated
def SetTruncated(self, truncated):
self._truncated = truncated
truncated = property(GetTruncated, SetTruncated,
doc='')
def GetRetweeted(self):
return self._retweeted
def SetRetweeted(self, retweeted):
self._retweeted = retweeted
retweeted = property(GetRetweeted, SetRetweeted,
doc='')
def GetSource(self):
return self._source
def SetSource(self, source):
self._source = source
source = property(GetSource, SetSource,
doc='')
def GetText(self):
'''Get the text of this status message.
Returns:
The text of this status message.
'''
return self._text
def SetText(self, text):
'''Set the text of this status message.
Args:
text:
The text of this status message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this status message')
def GetLocation(self):
'''Get the geolocation associated with this status message
Returns:
The geolocation string of this status message.
'''
return self._location
def SetLocation(self, location):
'''Set the geolocation associated with this status message
Args:
location:
The geolocation string of this status message
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geolocation string of this status message')
def GetRelativeCreatedAt(self):
'''Get a human readable string representing the posting time
Returns:
A human readable string representing the posting time
'''
fudge = 1.25
delta = long(self.now) - long(self.created_at_in_seconds)
if delta < (1 * fudge):
return 'about a second ago'
elif delta < (60 * (1/fudge)):
return 'about %d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'about a minute ago'
elif delta < (60 * 60 * (1/fudge)):
return 'about %d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge) or delta / (60 * 60) == 1:
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1/fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge) or delta / (60 * 60 * 24) == 1:
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
relative_created_at = property(GetRelativeCreatedAt,
doc='Get a human readable string representing '
'the posting time')
def GetUser(self):
'''Get a twitter.User representing the entity posting this status message.
Returns:
A twitter.User representing the entity posting this status message
'''
return self._user
def SetUser(self, user):
'''Set a twitter.User representing the entity posting this status message.
Args:
user:
A twitter.User representing the entity posting this status message
'''
self._user = user
user = property(GetUser, SetUser,
doc='A twitter.User representing the entity posting this '
'status message')
def GetNow(self):
'''Get the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Returns:
Whatever the status instance believes the current time to be,
in seconds since the epoch.
'''
if self._now is None:
self._now = time.time()
return self._now
def SetNow(self, now):
'''Set the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Args:
now:
The wallclock time for this instance.
'''
self._now = now
now = property(GetNow, SetNow,
doc='The wallclock time for this status instance.')
def GetGeo(self):
return self._geo
def SetGeo(self, geo):
self._geo = geo
geo = property(GetGeo, SetGeo,
doc='')
def GetPlace(self):
return self._place
def SetPlace(self, place):
self._place = place
place = property(GetPlace, SetPlace,
doc='')
def GetCoordinates(self):
return self._coordinates
def SetCoordinates(self, coordinates):
self._coordinates = coordinates
coordinates = property(GetCoordinates, SetCoordinates,
doc='')
def GetContributors(self):
return self._contributors
def SetContributors(self, contributors):
self._contributors = contributors
contributors = property(GetContributors, SetContributors,
doc='')
def GetRetweeted_status(self):
return self._retweeted_status
def SetRetweeted_status(self, retweeted_status):
self._retweeted_status = retweeted_status
retweeted_status = property(GetRetweeted_status, SetRetweeted_status,
doc='')
def GetRetweetCount(self):
return self._retweet_count
def SetRetweetCount(self, retweet_count):
self._retweet_count = retweet_count
retweet_count = property(GetRetweetCount, SetRetweetCount,
doc='')
def GetCurrent_user_retweet(self):
return self._current_user_retweet
def SetCurrent_user_retweet(self, current_user_retweet):
self._current_user_retweet = current_user_retweet
current_user_retweet = property(GetCurrent_user_retweet, SetCurrent_user_retweet,
doc='')
def GetPossibly_sensitive(self):
return self._possibly_sensitive
def SetPossibly_sensitive(self, possibly_sensitive):
self._possibly_sensitive = possibly_sensitive
possibly_sensitive = property(GetPossibly_sensitive, SetPossibly_sensitive,
doc='')
def GetScopes(self):
return self._scopes
def SetScopes(self, scopes):
self._scopes = scopes
scopes = property(GetScopes, SetScopes, doc='')
def GetWithheld_copyright(self):
return self._withheld_copyright
def SetWithheld_copyright(self, withheld_copyright):
self._withheld_copyright = withheld_copyright
withheld_copyright = property(GetWithheld_copyright, SetWithheld_copyright,
doc='')
def GetWithheld_in_countries(self):
return self._withheld_in_countries
def SetWithheld_in_countries(self, withheld_in_countries):
self._withheld_in_countries = withheld_in_countries
withheld_in_countries = property(GetWithheld_in_countries, SetWithheld_in_countries,
doc='')
def GetWithheld_scope(self):
return self._withheld_scope
def SetWithheld_scope(self, withheld_scope):
self._withheld_scope = withheld_scope
withheld_scope = property(GetWithheld_scope, SetWithheld_scope,
doc='')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.created_at == other.created_at and \
self.id == other.id and \
self.text == other.text and \
self.location == other.location and \
self.user == other.user and \
self.in_reply_to_screen_name == other.in_reply_to_screen_name and \
self.in_reply_to_user_id == other.in_reply_to_user_id and \
self.in_reply_to_status_id == other.in_reply_to_status_id and \
self.truncated == other.truncated and \
self.retweeted == other.retweeted and \
self.favorited == other.favorited and \
self.favorite_count == other.favorite_count and \
self.source == other.source and \
self.geo == other.geo and \
self.place == other.place and \
self.coordinates == other.coordinates and \
self.contributors == other.contributors and \
self.retweeted_status == other.retweeted_status and \
self.retweet_count == other.retweet_count and \
self.current_user_retweet == other.current_user_retweet and \
self.possibly_sensitive == other.possibly_sensitive and \
self.scopes == other.scopes and \
self.withheld_copyright == other.withheld_copyright and \
self.withheld_in_countries == other.withheld_in_countries and \
self.withheld_scope == other.withheld_scope
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.Status instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.Status instance.
Returns:
A JSON string representation of this twitter.Status instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.Status instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.Status instance
'''
data = {}
if self.created_at:
data['created_at'] = self.created_at
if self.favorited:
data['favorited'] = self.favorited
if self.favorite_count:
data['favorite_count'] = self.favorite_count
if self.id:
data['id'] = self.id
if self.text:
data['text'] = self.text
if self.location:
data['location'] = self.location
if self.user:
data['user'] = self.user.AsDict()
if self.in_reply_to_screen_name:
data['in_reply_to_screen_name'] = self.in_reply_to_screen_name
if self.in_reply_to_user_id:
data['in_reply_to_user_id'] = self.in_reply_to_user_id
if self.in_reply_to_status_id:
data['in_reply_to_status_id'] = self.in_reply_to_status_id
if self.truncated is not None:
data['truncated'] = self.truncated
if self.retweeted is not None:
data['retweeted'] = self.retweeted
if self.favorited is not None:
data['favorited'] = self.favorited
if self.source:
data['source'] = self.source
if self.geo:
data['geo'] = self.geo
if self.place:
data['place'] = self.place
if self.coordinates:
data['coordinates'] = self.coordinates
if self.contributors:
data['contributors'] = self.contributors
if self.hashtags:
data['hashtags'] = [h.text for h in self.hashtags]
if self.retweeted_status:
data['retweeted_status'] = self.retweeted_status.AsDict()
if self.retweet_count:
data['retweet_count'] = self.retweet_count
if self.urls:
data['urls'] = dict([(url.url, url.expanded_url) for url in self.urls])
if self.user_mentions:
data['user_mentions'] = [um.AsDict() for um in self.user_mentions]
if self.current_user_retweet:
data['current_user_retweet'] = self.current_user_retweet
if self.possibly_sensitive:
data['possibly_sensitive'] = self.possibly_sensitive
if self.scopes:
data['scopes'] = self.scopes
if self.withheld_copyright:
data['withheld_copyright'] = self.withheld_copyright
if self.withheld_in_countries:
data['withheld_in_countries'] = self.withheld_in_countries
if self.withheld_scope:
data['withheld_scope'] = self.withheld_scope
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
else:
retweeted_status = None
if 'current_user_retweet' in data:
current_user_retweet = data['current_user_retweet']['id']
else:
current_user_retweet = None
urls = None
user_mentions = None
hashtags = None
media = None
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
if 'media' in data['entities']:
media = data['entities']['media']
else:
media = []
return Status(created_at=data.get('created_at', None),
favorited=data.get('favorited', None),
favorite_count=data.get('favorite_count', None),
id=data.get('id', None),
text=data.get('text', None),
location=data.get('location', None),
in_reply_to_screen_name=data.get('in_reply_to_screen_name', None),
in_reply_to_user_id=data.get('in_reply_to_user_id', None),
in_reply_to_status_id=data.get('in_reply_to_status_id', None),
truncated=data.get('truncated', None),
retweeted=data.get('retweeted', None),
source=data.get('source', None),
user=user,
urls=urls,
user_mentions=user_mentions,
hashtags=hashtags,
media=media,
geo=data.get('geo', None),
place=data.get('place', None),
coordinates=data.get('coordinates', None),
contributors=data.get('contributors', None),
retweeted_status=retweeted_status,
current_user_retweet=current_user_retweet,
retweet_count=data.get('retweet_count', None),
possibly_sensitive=data.get('possibly_sensitive', None),
scopes=data.get('scopes', None),
withheld_copyright=data.get('withheld_copyright', None),
withheld_in_countries=data.get('withheld_in_countries', None),
withheld_scope=data.get('withheld_scope', None))
class User(object):
'''A class representing the User structure used by the twitter API.
The User structure exposes the following properties:
user.id
user.name
user.screen_name
user.location
user.description
user.profile_image_url
user.profile_background_tile
user.profile_background_image_url
user.profile_sidebar_fill_color
user.profile_background_color
user.profile_link_color
user.profile_text_color
user.protected
user.utc_offset
user.time_zone
user.url
user.status
user.statuses_count
user.followers_count
user.friends_count
user.favourites_count
user.geo_enabled
user.verified
user.lang
user.notifications
user.contributors_enabled
user.created_at
user.listed_count
'''
def __init__(self,
id=None,
name=None,
screen_name=None,
location=None,
description=None,
profile_image_url=None,
profile_background_tile=None,
profile_background_image_url=None,
profile_sidebar_fill_color=None,
profile_background_color=None,
profile_link_color=None,
profile_text_color=None,
protected=None,
utc_offset=None,
time_zone=None,
followers_count=None,
friends_count=None,
statuses_count=None,
favourites_count=None,
url=None,
status=None,
geo_enabled=None,
verified=None,
lang=None,
notifications=None,
contributors_enabled=None,
created_at=None,
listed_count=None):
self.id = id
self.name = name
self.screen_name = screen_name
self.location = location
self.description = description
self.profile_image_url = profile_image_url
self.profile_background_tile = profile_background_tile
self.profile_background_image_url = profile_background_image_url
self.profile_sidebar_fill_color = profile_sidebar_fill_color
self.profile_background_color = profile_background_color
self.profile_link_color = profile_link_color
self.profile_text_color = profile_text_color
self.protected = protected
self.utc_offset = utc_offset
self.time_zone = time_zone
self.followers_count = followers_count
self.friends_count = friends_count
self.statuses_count = statuses_count
self.favourites_count = favourites_count
self.url = url
self.status = status
self.geo_enabled = geo_enabled
self.verified = verified
self.lang = lang
self.notifications = notifications
self.contributors_enabled = contributors_enabled
self.created_at = created_at
self.listed_count = listed_count
def GetId(self):
'''Get the unique id of this user.
Returns:
The unique id of this user
'''
return self._id
def SetId(self, id):
'''Set the unique id of this user.
Args:
id: The unique id of this user.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this user.')
def GetName(self):
'''Get the real name of this user.
Returns:
The real name of this user
'''
return self._name
def SetName(self, name):
'''Set the real name of this user.
Args:
name: The real name of this user
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this user.')
def GetScreenName(self):
'''Get the short twitter name of this user.
Returns:
The short twitter name of this user
'''
return self._screen_name
def SetScreenName(self, screen_name):
'''Set the short twitter name of this user.
Args:
screen_name: the short twitter name of this user
'''
self._screen_name = screen_name
screen_name = property(GetScreenName, SetScreenName,
doc='The short twitter name of this user.')
def GetLocation(self):
'''Get the geographic location of this user.
Returns:
The geographic location of this user
'''
return self._location
def SetLocation(self, location):
'''Set the geographic location of this user.
Args:
location: The geographic location of this user
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geographic location of this user.')
def GetDescription(self):
'''Get the short text description of this user.
Returns:
The short text description of this user
'''
return self._description
def SetDescription(self, description):
'''Set the short text description of this user.
Args:
description: The short text description of this user
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The short text description of this user.')
def GetUrl(self):
'''Get the homepage url of this user.
Returns:
The homepage url of this user
'''
return self._url
def SetUrl(self, url):
'''Set the homepage url of this user.
Args:
url: The homepage url of this user
'''
self._url = url
url = property(GetUrl, SetUrl,
doc='The homepage url of this user.')
def GetProfileImageUrl(self):
'''Get the url of the thumbnail of this user.
Returns:
The url of the thumbnail of this user
'''
return self._profile_image_url
def SetProfileImageUrl(self, profile_image_url):
'''Set the url of the thumbnail of this user.
Args:
profile_image_url: The url of the thumbnail of this user
'''
self._profile_image_url = profile_image_url
profile_image_url= property(GetProfileImageUrl, SetProfileImageUrl,
doc='The url of the thumbnail of this user.')
def GetProfileBackgroundTile(self):
'''Boolean for whether to tile the profile background image.
Returns:
True if the background is to be tiled, False if not, None if unset.
'''
return self._profile_background_tile
def SetProfileBackgroundTile(self, profile_background_tile):
'''Set the boolean flag for whether to tile the profile background image.
Args:
profile_background_tile: Boolean flag for whether to tile or not.
'''
self._profile_background_tile = profile_background_tile
profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile,
doc='Boolean for whether to tile the background image.')
def GetProfileBackgroundImageUrl(self):
return self._profile_background_image_url
def SetProfileBackgroundImageUrl(self, profile_background_image_url):
self._profile_background_image_url = profile_background_image_url
profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl,
doc='The url of the profile background of this user.')
def GetProfileSidebarFillColor(self):
return self._profile_sidebar_fill_color
def SetProfileSidebarFillColor(self, profile_sidebar_fill_color):
self._profile_sidebar_fill_color = profile_sidebar_fill_color
profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor)
def GetProfileBackgroundColor(self):
return self._profile_background_color
def SetProfileBackgroundColor(self, profile_background_color):
self._profile_background_color = profile_background_color
profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor)
def GetProfileLinkColor(self):
return self._profile_link_color
def SetProfileLinkColor(self, profile_link_color):
self._profile_link_color = profile_link_color
profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor)
def GetProfileTextColor(self):
return self._profile_text_color
def SetProfileTextColor(self, profile_text_color):
self._profile_text_color = profile_text_color
profile_text_color = property(GetProfileTextColor, SetProfileTextColor)
def GetProtected(self):
return self._protected
def SetProtected(self, protected):
self._protected = protected
protected = property(GetProtected, SetProtected)
def GetUtcOffset(self):
return self._utc_offset
def SetUtcOffset(self, utc_offset):
self._utc_offset = utc_offset
utc_offset = property(GetUtcOffset, SetUtcOffset)
def GetTimeZone(self):
'''Returns the current time zone string for the user.
Returns:
The descriptive time zone string for the user.
'''
return self._time_zone
def SetTimeZone(self, time_zone):
'''Sets the user's time zone string.
Args:
time_zone:
The descriptive time zone to assign for the user.
'''
self._time_zone = time_zone
time_zone = property(GetTimeZone, SetTimeZone)
def GetStatus(self):
'''Get the latest twitter.Status of this user.
Returns:
The latest twitter.Status of this user
'''
return self._status
def SetStatus(self, status):
'''Set the latest twitter.Status of this user.
Args:
status:
The latest twitter.Status of this user
'''
self._status = status
status = property(GetStatus, SetStatus,
doc='The latest twitter.Status of this user.')
def GetFriendsCount(self):
'''Get the friend count for this user.
Returns:
The number of users this user has befriended.
'''
return self._friends_count
def SetFriendsCount(self, count):
'''Set the friend count for this user.
Args:
count:
The number of users this user has befriended.
'''
self._friends_count = count
friends_count = property(GetFriendsCount, SetFriendsCount,
doc='The number of friends for this user.')
def GetListedCount(self):
'''Get the listed count for this user.
Returns:
The number of lists this user belongs to.
'''
return self._listed_count
def SetListedCount(self, count):
'''Set the listed count for this user.
Args:
count:
The number of lists this user belongs to.
'''
self._listed_count = count
listed_count = property(GetListedCount, SetListedCount,
doc='The number of lists this user belongs to.')
def GetFollowersCount(self):
'''Get the follower count for this user.
Returns:
The number of users following this user.
'''
return self._followers_count
def SetFollowersCount(self, count):
'''Set the follower count for this user.
Args:
count:
The number of users following this user.
'''
self._followers_count = count
followers_count = property(GetFollowersCount, SetFollowersCount,
doc='The number of users following this user.')
def GetStatusesCount(self):
'''Get the number of status updates for this user.
Returns:
The number of status updates for this user.
'''
return self._statuses_count
def SetStatusesCount(self, count):
'''Set the status update count for this user.
Args:
count:
The number of updates for this user.
'''
self._statuses_count = count
statuses_count = property(GetStatusesCount, SetStatusesCount,
doc='The number of updates for this user.')
def GetFavouritesCount(self):
'''Get the number of favourites for this user.
Returns:
The number of favourites for this user.
'''
return self._favourites_count
def SetFavouritesCount(self, count):
'''Set the favourite count for this user.
Args:
count:
The number of favourites for this user.
'''
self._favourites_count = count
favourites_count = property(GetFavouritesCount, SetFavouritesCount,
doc='The number of favourites for this user.')
def GetGeoEnabled(self):
'''Get the setting of geo_enabled for this user.
Returns:
True/False if Geo tagging is enabled
'''
return self._geo_enabled
def SetGeoEnabled(self, geo_enabled):
'''Set the latest twitter.geo_enabled of this user.
Args:
geo_enabled:
True/False if Geo tagging is to be enabled
'''
self._geo_enabled = geo_enabled
geo_enabled = property(GetGeoEnabled, SetGeoEnabled,
doc='The value of twitter.geo_enabled for this user.')
def GetVerified(self):
'''Get the setting of verified for this user.
Returns:
True/False if user is a verified account
'''
return self._verified
def SetVerified(self, verified):
'''Set twitter.verified for this user.
Args:
verified:
True/False if user is a verified account
'''
self._verified = verified
verified = property(GetVerified, SetVerified,
doc='The value of twitter.verified for this user.')
def GetLang(self):
'''Get the setting of lang for this user.
Returns:
language code of the user
'''
return self._lang
def SetLang(self, lang):
'''Set twitter.lang for this user.
Args:
lang:
language code for the user
'''
self._lang = lang
lang = property(GetLang, SetLang,
doc='The value of twitter.lang for this user.')
def GetNotifications(self):
'''Get the setting of notifications for this user.
Returns:
True/False for the notifications setting of the user
'''
return self._notifications
def SetNotifications(self, notifications):
'''Set twitter.notifications for this user.
Args:
notifications:
True/False notifications setting for the user
'''
self._notifications = notifications
notifications = property(GetNotifications, SetNotifications,
doc='The value of twitter.notifications for this user.')
def GetContributorsEnabled(self):
'''Get the setting of contributors_enabled for this user.
Returns:
True/False contributors_enabled of the user
'''
return self._contributors_enabled
def SetContributorsEnabled(self, contributors_enabled):
'''Set twitter.contributors_enabled for this user.
Args:
contributors_enabled:
True/False contributors_enabled setting for the user
'''
self._contributors_enabled = contributors_enabled
contributors_enabled = property(GetContributorsEnabled, SetContributorsEnabled,
doc='The value of twitter.contributors_enabled for this user.')
def GetCreatedAt(self):
'''Get the setting of created_at for this user.
Returns:
created_at value of the user
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set twitter.created_at for this user.
Args:
created_at:
created_at value for the user
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The value of twitter.created_at for this user.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.screen_name == other.screen_name and \
self.location == other.location and \
self.description == other.description and \
self.profile_image_url == other.profile_image_url and \
self.profile_background_tile == other.profile_background_tile and \
self.profile_background_image_url == other.profile_background_image_url and \
self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \
self.profile_background_color == other.profile_background_color and \
self.profile_link_color == other.profile_link_color and \
self.profile_text_color == other.profile_text_color and \
self.protected == other.protected and \
self.utc_offset == other.utc_offset and \
self.time_zone == other.time_zone and \
self.url == other.url and \
self.statuses_count == other.statuses_count and \
self.followers_count == other.followers_count and \
self.favourites_count == other.favourites_count and \
self.friends_count == other.friends_count and \
self.status == other.status and \
self.geo_enabled == other.geo_enabled and \
self.verified == other.verified and \
self.lang == other.lang and \
self.notifications == other.notifications and \
self.contributors_enabled == other.contributors_enabled and \
self.created_at == other.created_at and \
self.listed_count == other.listed_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.User instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.User instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.User instance.
Returns:
A JSON string representation of this twitter.User instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.screen_name:
data['screen_name'] = self.screen_name
if self.location:
data['location'] = self.location
if self.description:
data['description'] = self.description
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.profile_background_tile is not None:
data['profile_background_tile'] = self.profile_background_tile
if self.profile_background_image_url:
data['profile_sidebar_fill_color'] = self.profile_background_image_url
if self.profile_background_color:
data['profile_background_color'] = self.profile_background_color
if self.profile_link_color:
data['profile_link_color'] = self.profile_link_color
if self.profile_text_color:
data['profile_text_color'] = self.profile_text_color
if self.protected is not None:
data['protected'] = self.protected
if self.utc_offset:
data['utc_offset'] = self.utc_offset
if self.time_zone:
data['time_zone'] = self.time_zone
if self.url:
data['url'] = self.url
if self.status:
data['status'] = self.status.AsDict()
if self.friends_count:
data['friends_count'] = self.friends_count
if self.followers_count:
data['followers_count'] = self.followers_count
if self.statuses_count:
data['statuses_count'] = self.statuses_count
if self.favourites_count:
data['favourites_count'] = self.favourites_count
if self.geo_enabled:
data['geo_enabled'] = self.geo_enabled
if self.verified:
data['verified'] = self.verified
if self.lang:
data['lang'] = self.lang
if self.notifications:
data['notifications'] = self.notifications
if self.contributors_enabled:
data['contributors_enabled'] = self.contributors_enabled
if self.created_at:
data['created_at'] = self.created_at
if self.listed_count:
data['listed_count'] = self.listed_count
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.User instance
'''
if 'status' in data:
status = Status.NewFromJsonDict(data['status'])
else:
status = None
return User(id=data.get('id', None),
name=data.get('name', None),
screen_name=data.get('screen_name', None),
location=data.get('location', None),
description=data.get('description', None),
statuses_count=data.get('statuses_count', None),
followers_count=data.get('followers_count', None),
favourites_count=data.get('favourites_count', None),
friends_count=data.get('friends_count', None),
profile_image_url=data.get('profile_image_url_https', data.get('profile_image_url', None)),
profile_background_tile = data.get('profile_background_tile', None),
profile_background_image_url = data.get('profile_background_image_url', None),
profile_sidebar_fill_color = data.get('profile_sidebar_fill_color', None),
profile_background_color = data.get('profile_background_color', None),
profile_link_color = data.get('profile_link_color', None),
profile_text_color = data.get('profile_text_color', None),
protected = data.get('protected', None),
utc_offset = data.get('utc_offset', None),
time_zone = data.get('time_zone', None),
url=data.get('url', None),
status=status,
geo_enabled=data.get('geo_enabled', None),
verified=data.get('verified', None),
lang=data.get('lang', None),
notifications=data.get('notifications', None),
contributors_enabled=data.get('contributors_enabled', None),
created_at=data.get('created_at', None),
listed_count=data.get('listed_count', None))
class List(object):
'''A class representing the List structure used by the twitter API.
The List structure exposes the following properties:
list.id
list.name
list.slug
list.description
list.full_name
list.mode
list.uri
list.member_count
list.subscriber_count
list.following
'''
def __init__(self,
id=None,
name=None,
slug=None,
description=None,
full_name=None,
mode=None,
uri=None,
member_count=None,
subscriber_count=None,
following=None,
user=None):
self.id = id
self.name = name
self.slug = slug
self.description = description
self.full_name = full_name
self.mode = mode
self.uri = uri
self.member_count = member_count
self.subscriber_count = subscriber_count
self.following = following
self.user = user
def GetId(self):
'''Get the unique id of this list.
Returns:
The unique id of this list
'''
return self._id
def SetId(self, id):
'''Set the unique id of this list.
Args:
id:
The unique id of this list.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this list.')
def GetName(self):
'''Get the real name of this list.
Returns:
The real name of this list
'''
return self._name
def SetName(self, name):
'''Set the real name of this list.
Args:
name:
The real name of this list
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this list.')
def GetSlug(self):
'''Get the slug of this list.
Returns:
The slug of this list
'''
return self._slug
def SetSlug(self, slug):
'''Set the slug of this list.
Args:
slug:
The slug of this list.
'''
self._slug = slug
slug = property(GetSlug, SetSlug,
doc='The slug of this list.')
def GetDescription(self):
'''Get the description of this list.
Returns:
The description of this list
'''
return self._description
def SetDescription(self, description):
'''Set the description of this list.
Args:
description:
The description of this list.
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The description of this list.')
def GetFull_name(self):
'''Get the full_name of this list.
Returns:
The full_name of this list
'''
return self._full_name
def SetFull_name(self, full_name):
'''Set the full_name of this list.
Args:
full_name:
The full_name of this list.
'''
self._full_name = full_name
full_name = property(GetFull_name, SetFull_name,
doc='The full_name of this list.')
def GetMode(self):
'''Get the mode of this list.
Returns:
The mode of this list
'''
return self._mode
def SetMode(self, mode):
'''Set the mode of this list.
Args:
mode:
The mode of this list.
'''
self._mode = mode
mode = property(GetMode, SetMode,
doc='The mode of this list.')
def GetUri(self):
'''Get the uri of this list.
Returns:
The uri of this list
'''
return self._uri
def SetUri(self, uri):
'''Set the uri of this list.
Args:
uri:
The uri of this list.
'''
self._uri = uri
uri = property(GetUri, SetUri,
doc='The uri of this list.')
def GetMember_count(self):
'''Get the member_count of this list.
Returns:
The member_count of this list
'''
return self._member_count
def SetMember_count(self, member_count):
'''Set the member_count of this list.
Args:
member_count:
The member_count of this list.
'''
self._member_count = member_count
member_count = property(GetMember_count, SetMember_count,
doc='The member_count of this list.')
def GetSubscriber_count(self):
'''Get the subscriber_count of this list.
Returns:
The subscriber_count of this list
'''
return self._subscriber_count
def SetSubscriber_count(self, subscriber_count):
'''Set the subscriber_count of this list.
Args:
subscriber_count:
The subscriber_count of this list.
'''
self._subscriber_count = subscriber_count
subscriber_count = property(GetSubscriber_count, SetSubscriber_count,
doc='The subscriber_count of this list.')
def GetFollowing(self):
'''Get the following status of this list.
Returns:
The following status of this list
'''
return self._following
def SetFollowing(self, following):
'''Set the following status of this list.
Args:
following:
The following of this list.
'''
self._following = following
following = property(GetFollowing, SetFollowing,
doc='The following status of this list.')
def GetUser(self):
'''Get the user of this list.
Returns:
The owner of this list
'''
return self._user
def SetUser(self, user):
'''Set the user of this list.
Args:
user:
The owner of this list.
'''
self._user = user
user = property(GetUser, SetUser,
doc='The owner of this list.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.slug == other.slug and \
self.description == other.description and \
self.full_name == other.full_name and \
self.mode == other.mode and \
self.uri == other.uri and \
self.member_count == other.member_count and \
self.subscriber_count == other.subscriber_count and \
self.following == other.following and \
self.user == other.user
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.List instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.List instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.List instance.
Returns:
A JSON string representation of this twitter.List instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.List instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.List instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.slug:
data['slug'] = self.slug
if self.description:
data['description'] = self.description
if self.full_name:
data['full_name'] = self.full_name
if self.mode:
data['mode'] = self.mode
if self.uri:
data['uri'] = self.uri
if self.member_count is not None:
data['member_count'] = self.member_count
if self.subscriber_count is not None:
data['subscriber_count'] = self.subscriber_count
if self.following is not None:
data['following'] = self.following
if self.user is not None:
data['user'] = self.user.AsDict()
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.List instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
return List(id=data.get('id', None),
name=data.get('name', None),
slug=data.get('slug', None),
description=data.get('description', None),
full_name=data.get('full_name', None),
mode=data.get('mode', None),
uri=data.get('uri', None),
member_count=data.get('member_count', None),
subscriber_count=data.get('subscriber_count', None),
following=data.get('following', None),
user=user)
class DirectMessage(object):
'''A class representing the DirectMessage structure used by the twitter API.
The DirectMessage structure exposes the following properties:
direct_message.id
direct_message.created_at
direct_message.created_at_in_seconds # read only
direct_message.sender_id
direct_message.sender_screen_name
direct_message.recipient_id
direct_message.recipient_screen_name
direct_message.text
'''
def __init__(self,
id=None,
created_at=None,
sender_id=None,
sender_screen_name=None,
recipient_id=None,
recipient_screen_name=None,
text=None):
'''An object to hold a Twitter direct message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
id:
The unique id of this direct message. [Optional]
created_at:
The time this direct message was posted. [Optional]
sender_id:
The id of the twitter user that sent this message. [Optional]
sender_screen_name:
The name of the twitter user that sent this message. [Optional]
recipient_id:
The id of the twitter that received this message. [Optional]
recipient_screen_name:
The name of the twitter that received this message. [Optional]
text:
The text of this direct message. [Optional]
'''
self.id = id
self.created_at = created_at
self.sender_id = sender_id
self.sender_screen_name = sender_screen_name
self.recipient_id = recipient_id
self.recipient_screen_name = recipient_screen_name
self.text = text
def GetId(self):
'''Get the unique id of this direct message.
Returns:
The unique id of this direct message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this direct message.
Args:
id:
The unique id of this direct message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this direct message.')
def GetCreatedAt(self):
'''Get the time this direct message was posted.
Returns:
The time this direct message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this direct message was posted.
Args:
created_at:
The time this direct message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this direct message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this direct message was posted, in seconds since the epoch.
Returns:
The time this direct message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this direct message was "
"posted, in seconds since the epoch")
def GetSenderId(self):
'''Get the unique sender id of this direct message.
Returns:
The unique sender id of this direct message
'''
return self._sender_id
def SetSenderId(self, sender_id):
'''Set the unique sender id of this direct message.
Args:
sender_id:
The unique sender id of this direct message
'''
self._sender_id = sender_id
sender_id = property(GetSenderId, SetSenderId,
doc='The unique sender id of this direct message.')
def GetSenderScreenName(self):
'''Get the unique sender screen name of this direct message.
Returns:
The unique sender screen name of this direct message
'''
return self._sender_screen_name
def SetSenderScreenName(self, sender_screen_name):
'''Set the unique sender screen name of this direct message.
Args:
sender_screen_name:
The unique sender screen name of this direct message
'''
self._sender_screen_name = sender_screen_name
sender_screen_name = property(GetSenderScreenName, SetSenderScreenName,
doc='The unique sender screen name of this direct message.')
def GetRecipientId(self):
'''Get the unique recipient id of this direct message.
Returns:
The unique recipient id of this direct message
'''
return self._recipient_id
def SetRecipientId(self, recipient_id):
'''Set the unique recipient id of this direct message.
Args:
recipient_id:
The unique recipient id of this direct message
'''
self._recipient_id = recipient_id
recipient_id = property(GetRecipientId, SetRecipientId,
doc='The unique recipient id of this direct message.')
def GetRecipientScreenName(self):
'''Get the unique recipient screen name of this direct message.
Returns:
The unique recipient screen name of this direct message
'''
return self._recipient_screen_name
def SetRecipientScreenName(self, recipient_screen_name):
'''Set the unique recipient screen name of this direct message.
Args:
recipient_screen_name:
The unique recipient screen name of this direct message
'''
self._recipient_screen_name = recipient_screen_name
recipient_screen_name = property(GetRecipientScreenName, SetRecipientScreenName,
doc='The unique recipient screen name of this direct message.')
def GetText(self):
'''Get the text of this direct message.
Returns:
The text of this direct message.
'''
return self._text
def SetText(self, text):
'''Set the text of this direct message.
Args:
text:
The text of this direct message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this direct message')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.created_at == other.created_at and \
self.sender_id == other.sender_id and \
self.sender_screen_name == other.sender_screen_name and \
self.recipient_id == other.recipient_id and \
self.recipient_screen_name == other.recipient_screen_name and \
self.text == other.text
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.DirectMessage instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.DirectMessage instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.DirectMessage instance.
Returns:
A JSON string representation of this twitter.DirectMessage instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.DirectMessage instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.DirectMessage instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.created_at:
data['created_at'] = self.created_at
if self.sender_id:
data['sender_id'] = self.sender_id
if self.sender_screen_name:
data['sender_screen_name'] = self.sender_screen_name
if self.recipient_id:
data['recipient_id'] = self.recipient_id
if self.recipient_screen_name:
data['recipient_screen_name'] = self.recipient_screen_name
if self.text:
data['text'] = self.text
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.DirectMessage instance
'''
return DirectMessage(created_at=data.get('created_at', None),
recipient_id=data.get('recipient_id', None),
sender_id=data.get('sender_id', None),
text=data.get('text', None),
sender_screen_name=data.get('sender_screen_name', None),
id=data.get('id', None),
recipient_screen_name=data.get('recipient_screen_name', None))
class Hashtag(object):
''' A class representing a twitter hashtag
'''
def __init__(self,
text=None):
self.text = text
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Hashtag instance
'''
return Hashtag(text = data.get('text', None))
class Trend(object):
''' A class representing a trending topic
'''
def __init__(self, name=None, query=None, timestamp=None, url=None):
self.name = name
self.query = query
self.timestamp = timestamp
self.url = url
def __str__(self):
return 'Name: %s\nQuery: %s\nTimestamp: %s\nSearch URL: %s\n' % (self.name, self.query, self.timestamp, self.url)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.name == other.name and \
self.query == other.query and \
self.timestamp == other.timestamp and \
self.url == self.url
except AttributeError:
return False
@staticmethod
def NewFromJsonDict(data, timestamp = None):
'''Create a new instance based on a JSON dict
Args:
data:
A JSON dict
timestamp:
Gets set as the timestamp property of the new object
Returns:
A twitter.Trend object
'''
return Trend(name=data.get('name', None),
query=data.get('query', None),
url=data.get('url', None),
timestamp=timestamp)
class Url(object):
'''A class representing an URL contained in a tweet'''
def __init__(self,
url=None,
expanded_url=None):
self.url = url
self.expanded_url = expanded_url
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Url instance
'''
return Url(url=data.get('url', None),
expanded_url=data.get('expanded_url', None))
class Api(object):
'''A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch the most recently posted public twitter status messages:
>>> statuses = api.GetPublicTimeline()
>>> print [s.user.name for s in statuses]
[u'DeWitt', u'Kesuke Miyagi', u'ev', u'Buzz Andersen', u'Biz Stone'] #...
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print [s.text for s in statuses]
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print [u.name for u in users]
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print status.text
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetHomeTimeLine()
>>> api.GetStatus(id)
>>> api.DestroyStatus(id)
>>> api.GetFriendsTimeline(user)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.GetSentDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.GetUserByEmail(email)
>>> api.VerifyCredentials()
'''
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
shortner=None,
base_url=None,
use_gzip_compression=False,
debugHTTP=False):
'''Instantiate a new twitter.Api object.
Args:
consumer_key:
Your Twitter user's consumer_key.
consumer_secret:
Your Twitter user's consumer_secret.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
input_encoding:
The encoding used to encode input strings. [Optional]
request_header:
A dictionary of additional HTTP request headers. [Optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching. [Optional]
shortner:
The shortner instance to use. Defaults to None.
See shorten_url.py for an example shortner. [Optional]
base_url:
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com. [Optional]
use_gzip_compression:
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False. [Optional]
debugHTTP:
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False. [Optional]
'''
self.SetCache(cache)
self._urllib = urllib2
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._oauth_consumer = None
self._shortlink_size = 19
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
if base_url is None:
self.base_url = 'https://api.twitter.com/1.1'
else:
self.base_url = base_url
if consumer_key is not None and (access_token_key is None or
access_token_secret is None):
print >> sys.stderr, 'Twitter now requires an oAuth Access Token for API calls.'
print >> sys.stderr, 'If your using this library from a command line utility, please'
print >> sys.stderr, 'run the the included get_access_token.py tool to generate one.'
raise TwitterError('Twitter requires oAuth Access Token for all API access')
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret)
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key=None,
access_token_secret=None):
'''Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
'''
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
self._oauth_consumer = None
if consumer_key is not None and consumer_secret is not None and \
access_token_key is not None and access_token_secret is not None:
self._signature_method_plaintext = oauth.SignatureMethod_PLAINTEXT()
self._signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
self._oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
self._oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
def ClearCredentials(self):
'''Clear the any credentials for this instance
'''
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self._oauth_consumer = None
def GetSearch(self,
term=None,
geocode=None,
since_id=None,
max_id=None,
until=None,
count=15,
lang=None,
locale=None,
result_type="mixed",
include_entities=None):
'''Return twitter search results for a given term.
Args:
term:
Term to search by. Optional if you include geocode.
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
until:
Returns tweets generated before the given date. Date should be
formatted as YYYY-MM-DD. [Optional]
geocode:
Geolocation information in the form (latitude, longitude, radius)
[Optional]
count:
Number of results to return. Default is 15 [Optional]
lang:
Language for results as ISO 639-1 code. Default is None (all languages)
[Optional]
locale:
Language of the search query. Currently only 'ja' is effective. This is
intended for language-specific consumers and the default should work in
the majority of cases.
result_type:
Type of result which should be returned. Default is "mixed". Other
valid options are "recent" and "popular". [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message containing
the term
'''
# Build request parameters
parameters = {}
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if until:
parameters['until'] = until
if lang:
parameters['lang'] = lang
if locale:
parameters['locale'] = locale
if term is None and geocode is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
parameters['geocode'] = ','.join(map(str, geocode))
if include_entities:
parameters['include_entities'] = 1
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if result_type in ["mixed", "popular", "recent"]:
parameters['result_type'] = result_type
# Make and send requests
url = '%s/search/tweets.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
# Return built list of statuses
return [Status.NewFromJsonDict(x) for x in data['statuses']]
def GetUsersSearch(self,
term=None,
page=1,
count=20,
include_entities=None):
'''Return twitter user search results for a given term.
Args:
term:
Term to search by.
page:
Page of results to return. Default is 1
[Optional]
count:
Number of results to return. Default is 20
[Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A sequence of twitter.User instances, one for each message containing
the term
'''
# Build request parameters
parameters = {}
if term is not None:
parameters['q'] = term
if include_entities:
parameters['include_entities'] = 1
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
# Make and send requests
url = '%s/users/search.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(x) for x in data]
def GetTrendsCurrent(self, exclude=None):
'''Get the current top trending topics (global)
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
'''
return self.GetTrendsWoeid(id=1, exclude=exclude)
def GetTrendsWoeid(self, id, exclude=None):
'''Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
'''
url = '%s/trends/place.json' % (self.base_url)
parameters = {'id': id}
if exclude:
parameters['exclude'] = exclude
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp = timestamp))
return trends
def GetHomeTimeline(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
exclude_replies=False,
contributor_details=False,
include_entities=True):
'''
Fetch a collection of the most recent Tweets and retweets posted by the
authenticating user and the users they follow.
The home timeline is central to how most users interact with the Twitter
service.
The twitter.Api instance must be authenticated.
Args:
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
When True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
exclude_replies:
This parameter will prevent replies from appearing in the
returned timeline. Using exclude_replies with the count
parameter will mean you will receive up-to count tweets -
this is because the count parameter retrieves that many
tweets before filtering out retweets and replies.
[Optional]
contributor_details:
This parameter enhances the contributors element of the
status response to include the screen_name of the contributor.
By default only the user_id of the contributor is included.
[Optional]
include_entities:
The entities node will be disincluded when set to false.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
'''
url = '%s/statuses/home_timeline.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("API must be authenticated.")
parameters = {}
if count is not None:
try:
if int(count) > 200:
raise TwitterError("'count' may not be greater than 200")
except ValueError:
raise TwitterError("'count' must be an integer")
parameters['count'] = count
if since_id:
try:
parameters['since_id'] = long(since_id)
except ValueError:
raise TwitterError("'since_id' must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except ValueError:
raise TwitterError("'max_id' must be an integer")
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
if contributor_details:
parameters['contributor_details'] = 1
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
include_rts=None,
trim_user=None,
exclude_replies=None):
'''Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
user_id:
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name. [Optional]
screen_name:
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
trim_user:
If True, statuses will only contain the numerical user ID only.
Otherwise a full user object will be returned for each status.
[Optional]
exclude_replies:
If True, this will prevent replies from appearing in the returned
timeline. Using exclude_replies with the count parameter will mean you
will receive up-to count tweets - this is because the count parameter
retrieves that many tweets before filtering out retweets and replies.
This parameter is only supported for JSON and XML responses. [Optional]
Returns:
A sequence of Status instances, one for each message up to count
'''
parameters = {}
url = '%s/statuses/user_timeline.json' % (self.base_url)
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if include_rts:
parameters['include_rts'] = 1
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self,
id,
trim_user=False,
include_my_retweet=True,
include_entities=True):
'''Returns a single status message, specified by the id parameter.
The twitter.Api instance must be authenticated.
Args:
id:
The numeric ID of the status you are trying to retrieve.
trim_user:
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Omit this parameter to receive the complete user object.
[Optional]
include_my_retweet:
When set to True, any Tweets returned that have been retweeted by
the authenticating user will include an additional
current_user_retweet node, containing the ID of the source status
for the retweet. [Optional]
include_entities:
If False, the entities node will be disincluded.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
'''
url = '%s/statuses/show.json' % (self.base_url)
if not self._oauth_consumer:
raise TwitterError("API must be authenticated.")
parameters = {}
try:
parameters['id'] = long(id)
except ValueError:
raise TwitterError("'id' must be an integer.")
if trim_user:
parameters['trim_user'] = 1
if include_my_retweet:
parameters['include_my_retweet'] = 1
if not include_entities:
parameters['include_entities'] = 'none'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyStatus(self, id, trim_user=False):
'''Destroys the status specified by the required ID parameter.
The twitter.Api instance must be authenticated and the
authenticating user must be the author of the specified status.
Args:
id:
The numerical ID of the status you're trying to destroy.
Returns:
A twitter.Status instance representing the destroyed status message
'''
if not self._oauth_consumer:
raise TwitterError("API must be authenticated.")
try:
post_data = {'id': long(id)}
except:
raise TwitterError("id must be an integer")
url = '%s/statuses/destroy/%s.json' % (self.base_url, id)
if trim_user:
post_data['trim_user'] = 1
json = self._FetchUrl(url, post_data=post_data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
@classmethod
def _calculate_status_length(cls, status, linksize=19):
dummy_link_replacement = 'https://-%d-chars%s/' % (linksize, '-'*(linksize - 18))
shortened = ' '.join([x if not (x.startswith('http://') or
x.startswith('https://'))
else
dummy_link_replacement
for x in status.split(' ')])
return len(shortened)
def PostUpdate(self, status, in_reply_to_status_id=None, latitude=None, longitude=None, place_id=None, display_coordinates=False, trim_user=False):
'''Post a twitter status message from the authenticated user.
The twitter.Api instance must be authenticated.
https://dev.twitter.com/docs/api/1.1/post/statuses/update
Args:
status:
The message text to be posted.
Must be less than or equal to 140 characters.
in_reply_to_status_id:
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored. [Optional]
latitude:
Latitude coordinate of the tweet in degrees. Will only work
in conjunction with longitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
longitude:
Longitude coordinate of the tweet in degrees. Will only work
in conjunction with latitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
place_id:
A place in the world. These IDs can be retrieved from
GET geo/reverse_geocode. [Optional]
display_coordinates:
Whether or not to put a pin on the exact coordinates a tweet
has been sent from. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A twitter.Status instance representing the message posted.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, unicode) or self._input_encoding is None:
u_status = status
else:
u_status = unicode(status, self._input_encoding)
#if self._calculate_status_length(u_status, self._shortlink_size) > CHARACTER_LIMIT:
# raise TwitterError("Text must be less than or equal to %d characters. "
# "Consider using PostUpdates." % CHARACTER_LIMIT)
data = {'status': status}
if in_reply_to_status_id:
data['in_reply_to_status_id'] = in_reply_to_status_id
if latitude is not None and longitude is not None:
data['lat'] = str(latitude)
data['long'] = str(longitude)
if place_id is not None:
data['place_id'] = str(place_id)
if display_coordinates:
data['display_coordinates'] = 'true'
if trim_user:
data['trim_user'] = 'true'
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def PostUpdates(self, status, continuation=None, **kwargs):
'''Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than 140 characters.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
May be longer than 140 characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
'''
results = list()
if continuation is None:
continuation = ''
line_length = CHARACTER_LIMIT - len(continuation)
lines = textwrap.wrap(status, line_length)
for line in lines[0:-1]:
results.append(self.PostUpdate(line + continuation, **kwargs))
results.append(self.PostUpdate(lines[-1], **kwargs))
return results
def PostRetweet(self, original_id, trim_user=False):
'''Retweet a tweet with the Retweet API.
The twitter.Api instance must be authenticated.
Args:
original_id:
The numerical id of the tweet that will be retweeted
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A twitter.Status instance representing the original tweet with retweet details embedded.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
try:
if int(original_id) <= 0:
raise TwitterError("'original_id' must be a positive number")
except ValueError:
raise TwitterError("'original_id' must be an integer")
url = '%s/statuses/retweet/%s.json' % (self.base_url, original_id)
data = {'id': original_id}
if trim_user:
data['trim_user'] = 'true'
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def GetUserRetweets(self, count=None, since_id=None, max_id=None, trim_user=False):
'''Fetch the sequence of retweets made by the authenticated user.
The twitter.Api instance must be authenticated.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
'''
return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user, exclude_replies=True, include_rts=True)
def GetReplies(self, since_id=None, count=None, max_id=None, trim_user=False):
'''Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
'''
return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user, exclude_replies=False, include_rts=False)
def GetRetweets(self, statusid, count=None, trim_user=False):
'''Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid:
The ID of the tweet for which retweets should be searched for
count:
The number of status messages to retrieve. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A list of twitter.Status instances, which are retweets of statusid
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instsance must be authenticated.")
url = '%s/statuses/retweets/%s.json' % (self.base_url, statusid)
parameters = {}
if trim_user:
parameters['trim_user'] = 'true'
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetRetweetsOfMe(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
include_entities=True,
include_user_entities=True):
'''Returns up to 100 of the most recent tweets of the user that have been
retweeted by others.
Args:
count:
The number of retweets to retrieve, up to 100. If omitted, 20 is
assumed.
since_id:
Returns results with an ID greater than (newer than) this ID.
max_id:
Returns results with an ID less than or equal to this ID.
trim_user:
When True, the user object for each tweet will only be an ID.
include_entities:
When True, the tweet entities will be included.
include_user_entities:
When True, the user entities will be included.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/retweets_of_me.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if trim_user:
parameters['trim_user'] = trim_user
if not include_entities:
parameters['include_entities'] = include_entities
if not include_user_entities:
parameters['include_user_entities'] = include_user_entities
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetFriends(self, user_id=None, screen_name=None, cursor=-1, skip_status=False, include_user_entities=False):
'''Fetch the sequence of twitter.User instances, one for each friend.
The twitter.Api instance must be authenticated.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns [Optional(ish)]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included.
Returns:
A sequence of twitter.User instances, one for each friend
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/friends/list.json' % self.base_url
result = []
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if skip_status:
parameters['skip_status'] = True
if include_user_entities:
parameters['include_user_entities'] = True
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def GetFriendIDs(self, user_id=None, screen_name=None, cursor=-1, stringify_ids=False, count=None):
'''Returns a list of twitter user id's for every person
the specified user is following.
Args:
user_id:
The id of the user to retrieve the id list for
[Optional]
screen_name:
The screen_name of the user to retrieve the id list for
[Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits.
[Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of integers.
[Optional]
count:
The number of status messages to retrieve. [Optional]
Returns:
A list of integers, one for each user id.
'''
url = '%s/friends/ids.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if stringify_ids:
parameters['stringify_ids'] = True
if count is not None:
parameters['count'] = count
result = []
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def GetFollowerIDs(self, user_id=None, screen_name=None, cursor=-1, stringify_ids=False, count=None, total_count=None):
'''Returns a list of twitter user id's for every person
that is following the specified user.
Args:
user_id:
The id of the user to retrieve the id list for
[Optional]
screen_name:
The screen_name of the user to retrieve the id list for
[Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits.
[Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of integers.
[Optional]
count:
The number of user id's to retrieve per API request. Please be aware that
this might get you rate-limited if set to a small number. By default Twitter
will retrieve 5000 UIDs per call.
[Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many followers
and you don't want to get rate limited. The data returned might contain more
UIDs if total_count is not a multiple of count (5000 by default).
[Optional]
Returns:
A list of integers, one for each user id.
'''
url = '%s/followers/ids.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if stringify_ids:
parameters['stringify_ids'] = True
if count is not None:
parameters['count'] = count
result = []
while True:
if total_count and total_count < count:
parameters['count'] = total_count
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def GetFollowers(self, user_id=None, screen_name=None, cursor=-1, skip_status=False, include_user_entities=False):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns [Optional(ish)]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included.
Returns:
A sequence of twitter.User instances, one for each follower
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/followers/list.json' % self.base_url
result = []
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if skip_status:
parameters['skip_status'] = True
if include_user_entities:
parameters['include_user_entities'] = True
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def UsersLookup(self, user_id=None, screen_name=None, users=None, include_entities=True):
'''Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
The twitter.Api instance must be authenticated.
Args:
user_id:
A list of user_ids to retrieve extended information.
[Optional]
screen_name:
A list of screen_names to retrieve extended information.
[Optional]
users:
A list of twitter.User objects to retrieve extended information.
[Optional]
include_entities:
The entities node that may appear within embedded statuses will be
disincluded when set to False.
[Optional]
Returns:
A list of twitter.User objects for the requested users
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
if not user_id and not screen_name and not users:
raise TwitterError("Specify at least one of user_id, screen_name, or users.")
url = '%s/users/lookup.json' % self.base_url
parameters = {}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join(["%s" % u for u in uids])
if screen_name:
parameters['screen_name'] = ','.join(screen_name)
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters=parameters)
try:
data = self._ParseAndCheckTwitter(json)
except TwitterError, e:
_, e, _ = sys.exc_info()
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
data = []
else:
raise
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self, user_id=None, screen_name=None, include_entities=True):
'''Returns a single user.
The twitter.Api instance must be authenticated.
Args:
user_id:
The id of the user to retrieve.
[Optional]
screen_name:
The screen name of the user for whom to return results for. Either a
user_id or screen_name is required for this method.
[Optional]
include_entities:
if set to False, the 'entities' node will not be included.
[Optional]
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show.json' % (self.base_url)
parameters = {}
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def GetDirectMessages(self, since_id=None, max_id=None, count=None, include_entities=True, skip_status=False):
'''Returns a list of the direct messages sent to the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
include_entities:
The entities node will not be included when set to False.
[Optional]
skip_status:
When set to True statuses will not be included in the returned user
objects. [Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if not include_entities:
parameters['include_entities'] = 'false'
if skip_status:
parameters['skip_status'] = 1
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def GetSentDirectMessages(self, since_id=None, max_id=None, count=None, page=None, include_entities=True):
'''Returns a list of the direct messages sent by the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_entities:
The entities node will not be included when set to False.
[Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages/sent.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self, text, user_id=None, screen_name=None):
'''Post a twitter direct message from the authenticated user
The twitter.Api instance must be authenticated. user_id or screen_name
must be specified.
Args:
text: The message text to be posted. Must be less than 140 characters.
user_id:
The ID of the user who should receive the direct message.
[Optional]
screen_name:
The screen name of the user who should receive the direct message.
[Optional]
Returns:
A twitter.DirectMessage instance representing the message posted
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/direct_messages/new.json' % self.base_url
data = {'text': text}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, id, include_entities=True):
'''Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
id: The id of the direct message to be destroyed
Returns:
A twitter.DirectMessage instance representing the message destroyed
'''
url = '%s/direct_messages/destroy.json' % self.base_url
data = {'id': id}
if not include_entities:
data['include_entities'] = 'false'
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user_id=None, screen_name=None, follow=True):
'''Befriends the user specified by the user_id or screen_name.
The twitter.Api instance must be authenticated.
Args:
user_id:
A user_id to follow [Optional]
screen_name:
A screen_name to follow [Optional]
follow:
Set to False to disable notifications for the target user
Returns:
A twitter.User instance representing the befriended user.
'''
url = '%s/friendships/create.json' % (self.base_url)
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
if follow:
data['follow'] = 'true'
else:
data['follow'] = 'false'
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def DestroyFriendship(self, user_id=None, screen_name=None):
'''Discontinues friendship with a user_id or screen_name.
The twitter.Api instance must be authenticated.
Args:
user_id:
A user_id to unfollow [Optional]
screen_name:
A screen_name to unfollow [Optional]
Returns:
A twitter.User instance representing the discontinued friend.
'''
url = '%s/friendships/destroy.json' % self.base_url
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def CreateFavorite(self, status=None, id=None, include_entities=True):
'''Favorites the specified status object or id as the authenticating user.
Returns the favorite status when successful.
The twitter.Api instance must be authenticated.
Args:
id:
The id of the twitter status to mark as a favorite.
[Optional]
status:
The twitter.Status object to mark as a favorite.
[Optional]
include_entities:
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-marked favorite.
'''
url = '%s/favorites/create.json' % self.base_url
data = {}
if id:
data['id'] = id
elif status:
data['id'] = status.id
else:
raise TwitterError("Specify id or status")
if not include_entities:
data['include_entities'] = 'false'
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyFavorite(self, status=None, id=None, include_entities=True):
'''Un-Favorites the specified status object or id as the authenticating user.
Returns the un-favorited status when successful.
The twitter.Api instance must be authenticated.
Args:
id:
The id of the twitter status to unmark as a favorite.
[Optional]
status:
The twitter.Status object to unmark as a favorite.
[Optional]
include_entities:
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
'''
url = '%s/favorites/destroy.json' % self.base_url
data = {}
if id:
data['id'] = id
elif status:
data['id'] = status.id
else:
raise TwitterError("Specify id or status")
if not include_entities:
data['include_entities'] = 'false'
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user_id=None,
screen_name=None,
count=None,
since_id=None,
max_id=None,
include_entities=True):
'''Return a list of Status objects representing favorited tweets.
By default, returns the (up to) 20 most recent tweets for the
authenticated user.
Args:
user:
The twitter name or id of the user whose favorites you are fetching.
If not specified, defaults to the authenticated user. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
'''
parameters = {}
url = '%s/favorites/list.json' % self.base_url
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = user_id
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if include_entities:
parameters['include_entities'] = True
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
contributor_details=False,
include_entities=True):
'''Returns the 20 most recent mentions (status containing @screen_name)
for the authenticating user.
Args:
count:
Specifies the number of tweets to try and retrieve, up to a maximum of
200. The value of count is best thought of as a limit to the number of
tweets to return because suspended or deleted content is removed after
the count has been applied. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
trim_user:
When set to True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object.
contributor_details:
If set to True, this parameter enhances the contributors element of the
status response to include the screen_name of the contributor. By
default only the user_id of the contributor is included.
include_entities:
The entities node will be disincluded when set to False.
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
'''
url = '%s/statuses/mentions_timeline.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if trim_user:
parameters['trim_user'] = 1
if contributor_details:
parameters['contributor_details'] = 'true'
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def CreateList(self, name, mode=None, description=None):
'''Creates a new list with the give name for the authenticated user.
The twitter.Api instance must be authenticated.
Args:
name:
New name for the list
mode:
'public' or 'private'.
Defaults to 'public'. [Optional]
description:
Description of the list. [Optional]
Returns:
A twitter.List instance representing the new list
'''
url = '%s/lists/create.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
json = self._FetchUrl(url, post_data=parameters)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroyList(self,
owner_screen_name=False,
owner_id=False,
list_id=None,
slug=None):
'''
Destroys the list identified by list_id or owner_screen_name/owner_id and
slug.
The twitter.Api instance must be authenticated.
Args:
owner_screen_name:
The screen_name of the user who owns the list being requested by a slug.
owner_id:
The user ID of the user who owns the list being requested by a slug.
list_id:
The numerical id of the list.
slug:
You can identify a list by its slug instead of its numerical id. If you
decide to do so, note that you'll also have to specify the list owner
using the owner_id or owner_screen_name parameters.
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/lists/destroy.json' % self.base_url
data = {}
if list_id:
try:
data['list_id']= long(list_id)
except:
raise TwitterError("list_id must be an integer")
elif slug:
data['slug'] = slug
if owner_id:
try:
data['owner_id'] = long(owner_id)
except:
raise TwitterError("owner_id must be an integer")
elif owner_screen_name:
data['owner_screen_name'] = owner_screen_name
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def CreateSubscription(self,
owner_screen_name=False,
owner_id=False,
list_id=None,
slug=None):
'''Creates a subscription to a list by the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner_screen_name:
The screen_name of the user who owns the list being requested by a slug.
owner_id:
The user ID of the user who owns the list being requested by a slug.
list_id:
The numerical id of the list.
slug:
You can identify a list by its slug instead of its numerical id. If you
decide to do so, note that you'll also have to specify the list owner
using the owner_id or owner_screen_name parameters.
Returns:
A twitter.List instance representing the list subscribed to
'''
url = '%s/lists/subscribers/create.json' % (self.base_url)
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
data = {}
if list_id:
try:
data['list_id']= long(list_id)
except:
raise TwitterError("list_id must be an integer")
elif slug:
data['slug'] = slug
if owner_id:
try:
data['owner_id'] = long(owner_id)
except:
raise TwitterError("owner_id must be an integer")
elif owner_screen_name:
data['owner_screen_name'] = owner_screen_name
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroySubscription(self,
owner_screen_name=False,
owner_id=False,
list_id=None,
slug=None):
'''Destroys the subscription to a list for the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner_screen_name:
The screen_name of the user who owns the list being requested by a slug.
owner_id:
The user ID of the user who owns the list being requested by a slug.
list_id:
The numerical id of the list.
slug:
You can identify a list by its slug instead of its numerical id. If you
decide to do so, note that you'll also have to specify the list owner
using the owner_id or owner_screen_name parameters.
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/lists/subscribers/destroy.json' % (self.base_url)
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
data = {}
if list_id:
try:
data['list_id']= long(list_id)
except:
raise TwitterError("list_id must be an integer")
elif slug:
data['slug'] = slug
if owner_id:
try:
data['owner_id'] = long(owner_id)
except:
raise TwitterError("owner_id must be an integer")
elif owner_screen_name:
data['owner_screen_name'] = owner_screen_name
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def GetSubscriptions(self, user_id=None, screen_name=None, count=20, cursor=-1):
'''
Obtain a collection of the lists the specified user is subscribed to, 20
lists per page by default. Does not include the user's own lists.
The twitter.Api instance must be authenticated.
Args:
user_id:
The ID of the user for whom to return results for. [Optional]
screen_name:
The screen name of the user for whom to return results for.
[Optional]
count:
The amount of results to return per page. Defaults to 20.
No more than 1000 results will ever be returned in a single page.
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/lists/subscriptions.json' % (self.base_url)
parameters = {}
try:
parameters['cursor'] = int(cursor)
except:
raise TwitterError("cursor must be an integer")
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if user_id is not None:
try:
parameters['user_id'] = long(user_id)
except:
raise TwitterError('user_id must be an integer')
elif screen_name is not None:
parameters['screen_name'] = screen_name
else:
raise TwitterError('Specify user_id or screen_name')
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetLists(self, user_id=None, screen_name=None, count=None, cursor=-1):
'''Fetch the sequence of lists for a user.
The twitter.Api instance must be authenticated.
Args:
user_id:
The ID of the user for whom to return results for. [Optional]
screen_name:
The screen name of the user for whom to return results for.
[Optional]
count:
The amount of results to return per page. Defaults to 20. No more than
1000 results will ever be returned in a single page.
[Optional]
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/lists/ownerships.json' % self.base_url
result = []
parameters = {}
if user_id is not None:
try:
parameters['user_id'] = long(user_id)
except:
raise TwitterError('user_id must be an integer')
elif screen_name is not None:
parameters['screen_name'] = screen_name
else:
raise TwitterError('Specify user_id or screen_name')
if count is not None:
parameters['count'] = count
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [List.NewFromJsonDict(x) for x in data['lists']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def VerifyCredentials(self):
'''Returns a twitter.User instance if the authenticating user is valid.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
'''
if not self._oauth_consumer:
raise TwitterError("Api instance must first be given user credentials.")
url = '%s/account/verify_credentials.json' % self.base_url
try:
json = self._FetchUrl(url, no_cache=True)
except urllib2.HTTPError, http_error:
if http_error.code == httplib.UNAUTHORIZED:
return None
else:
raise http_error
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def SetCache(self, cache):
'''Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
'''
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
'''Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
'''
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
'''Override the default user agent
Args:
user_agent:
A string that should be send to the server as the User-agent
'''
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
'''Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
'''
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
'''Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server. New source values are authorized on a case by
case basis by the Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
'''
self._default_params['source'] = source
def GetRateLimitStatus(self, resources=None):
'''Fetch the rate limit status for the currently authorized user.
Args:
resources:
A comma seperated list of resource families you want to know the current
rate limit disposition of.
[Optional]
Returns:
A dictionary containing the time the limit will reset (reset_time),
the number of remaining hits allowed before the reset (remaining_hits),
the number of hits allowed in a 60-minute period (hourly_limit), and
the time of the reset in seconds since The Epoch (reset_time_in_seconds).
'''
parameters = {}
if resources is not None:
parameters['resources'] = resources
url = '%s/application/rate_limit_status.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters, no_cache=True)
data = self._ParseAndCheckTwitter(json)
return data
def MaximumHitFrequency(self):
'''Determines the minimum number of seconds that a program must wait
before hitting the server again without exceeding the rate_limit
imposed for the currently authenticated user.
Returns:
The minimum second interval that a program must use so as to not
exceed the rate_limit imposed for the user.
'''
rate_status = self.GetRateLimitStatus()
reset_time = rate_status.get('reset_time', None)
limit = rate_status.get('remaining_hits', None)
if reset_time:
# put the reset time into a datetime object
reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7])
# find the difference in time between now and the reset time + 1 hour
delta = reset + datetime.timedelta(hours=1) - datetime.datetime.utcnow()
if not limit:
return int(delta.seconds)
# determine the minimum number of seconds allowed as a regular interval
max_frequency = int(delta.seconds / limit) + 1
# return the number of seconds
return max_frequency
return 60
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(self._urllib.__version__, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def _DecompressGzippedResponse(self, response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj=StringIO.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
def _Encode(self, s):
if self._input_encoding:
return unicode(s, self._input_encoding).encode('utf-8')
else:
return unicode(s).encode('utf-8')
def _EncodeParameters(self, parameters):
'''Return a string in key=value&key=value form
Values of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if parameters is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None]))
def _EncodePostData(self, post_data):
'''Return a string in key=value&key=value form
Values are assumed to be encoded in the format specified by self._encoding,
and are subsequently URL encoded.
Args:
post_data:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if post_data is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in post_data.items()]))
def _ParseAndCheckTwitter(self, json):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error. This is a purely
defensive check because during some Twitter network outages
it will return an HTML failwhale page."""
try:
data = simplejson.loads(json)
self._CheckForTwitterError(data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json:
raise TwitterError("Capacity Error")
if "<title>Twitter / Error</title>" in json:
raise TwitterError("Technical Error")
raise TwitterError("json decoding")
return data
def _CheckForTwitterError(self, data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data:
A python dict created from the Twitter json response
Raises:
TwitterError wrapping the twitter error message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _FetchUrl(self,
url,
post_data=None,
parameters=None,
no_cache=None,
use_gzip_compression=None):
'''Fetch a URL, optionally caching for a specified time.
Args:
url:
The URL to retrieve
post_data:
A dict of (str, unicode) key/value pairs.
If set, POST will be used.
parameters:
A dict whose key/value pairs should encoded and added
to the query string. [Optional]
no_cache:
If true, overrides the cache on the current request
use_gzip_compression:
If True, tells the server to gzip-compress the response.
It does not apply to POST requests.
Defaults to None, which will get the value to use from
the instance variable self._use_gzip [Optional]
Returns:
A string containing the body of the response.
'''
# Build the extra parameters dict
extra_params = {}
if self._default_params:
extra_params.update(self._default_params)
if parameters:
extra_params.update(parameters)
if post_data:
http_method = "POST"
else:
http_method = "GET"
if self._debugHTTP:
_debug = 1
else:
_debug = 0
http_handler = self._urllib.HTTPHandler(debuglevel=_debug)
https_handler = self._urllib.HTTPSHandler(debuglevel=_debug)
http_proxy = os.environ.get('http_proxy')
https_proxy = os.environ.get('https_proxy')
if http_proxy is None or https_proxy is None :
proxy_status = False
else :
proxy_status = True
opener = self._urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
if proxy_status is True :
proxy_handler = self._urllib.ProxyHandler({'http':str(http_proxy),'https': str(https_proxy)})
opener.add_handler(proxy_handler)
if use_gzip_compression is None:
use_gzip = self._use_gzip
else:
use_gzip = use_gzip_compression
# Set up compression
if use_gzip and not post_data:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
if self._oauth_consumer is not None:
if post_data and http_method == "POST":
parameters = post_data.copy()
req = oauth.Request.from_consumer_and_token(self._oauth_consumer,
token=self._oauth_token,
http_method=http_method,
http_url=url, parameters=parameters)
req.sign_request(self._signature_method_hmac_sha1, self._oauth_consumer, self._oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
else:
url = self._BuildUrl(url, extra_params=extra_params)
encoded_post_data = self._EncodePostData(post_data)
# Open and return the URL immediately if we're not going to cache
if encoded_post_data or no_cache or not self._cache or not self._cache_timeout:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
opener.close()
else:
# Unique keys are a combination of the url and the oAuth Consumer Key
if self._consumer_key:
key = self._consumer_key + ':' + url
else:
key = url
# See if it has been cached before
last_cached = self._cache.GetCachedTime(key)
# If the cached version is outdated then fetch another and store it
if not last_cached or time.time() >= last_cached + self._cache_timeout:
try:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
self._cache.Set(key, url_data)
except urllib2.HTTPError, e:
print e
opener.close()
else:
url_data = self._cache.Get(key)
# Always return the latest version
return url_data
class _FileCacheError(Exception):
'''Base exception class for FileCache related errors'''
class _FileCache(object):
DEPTH = 3
def __init__(self,root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return open(path).read()
else:
return None
def Set(self,key,data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self,key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory ))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
'''Attempt to find the username in a cross-platform fashion.'''
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError), e:
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
if not os.path.exists(root_directory):
os.mkdir(root_directory)
if not os.path.isdir(root_directory):
raise _FileCacheError('%s exists but is not a directory' %
root_directory)
self._root_directory = root_directory
def _GetPath(self,key):
try:
hashed_key = md5(key).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self,hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
|
gpl-3.0
| -8,450,099,248,566,052,000 | 152,697,649,916,274,050 | 31.265534 | 149 | 0.632824 | false |
ebattenberg/Lasagne
|
lasagne/layers/embedding.py
|
2
|
1857
|
import numpy as np
import theano.tensor as T
from .. import init
from .base import Layer
__all__ = [
"EmbeddingLayer"
]
class EmbeddingLayer(Layer):
"""
A layer for word embeddings. The input should be an integer type
Tensor variable.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
input_size: int
The Number of different embeddings. The last embedding will have index
input_size - 1.
output_size : int
The size of each embedding.
W : Theano shared variable, numpy array or callable
The embedding matrix.
Examples
--------
>>> from lasagne.layers import EmbeddingLayer, InputLayer, get_output
>>> import theano
>>> x = T.imatrix()
>>> l_in = InputLayer((3, ))
>>> W = np.arange(3*5).reshape((3, 5)).astype('float32')
>>> l1 = EmbeddingLayer(l_in, input_size=3, output_size=5, W=W)
>>> output = get_output(l1, x)
>>> f = theano.function([x], output)
>>> x_test = np.array([[0, 2], [1, 2]]).astype('int32')
>>> f(x_test)
array([[[ 0., 1., 2., 3., 4.],
[ 10., 11., 12., 13., 14.]],
<BLANKLINE>
[[ 5., 6., 7., 8., 9.],
[ 10., 11., 12., 13., 14.]]], dtype=float32)
"""
def __init__(self, incoming, input_size, output_size,
W=init.Normal(), **kwargs):
super(EmbeddingLayer, self).__init__(incoming, **kwargs)
self.input_size = input_size
self.output_size = output_size
self.W = self.add_param(W, (input_size, output_size), name="W")
def get_output_shape_for(self, input_shape):
return input_shape + (self.output_size, )
def get_output_for(self, input, **kwargs):
return self.W[input]
|
mit
| -5,821,124,162,386,458,000 | -4,966,185,492,874,632,000 | 28.015625 | 78 | 0.562197 | false |
ondra-novak/chromium.src
|
chrome/common/extensions/docs/server2/mock_file_system.py
|
7
|
4312
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
from file_system import FileSystem, FileNotFoundError
from future import Future
from test_file_system import _List, _StatTracker, TestFileSystem
from path_util import IsDirectory
class MockFileSystem(FileSystem):
'''Wraps FileSystems to add a selection of mock behaviour:
- asserting how often Stat/Read calls are being made to it.
- primitive changes/versioning via applying object "diffs", mapping paths to
new content (similar to how TestFileSystem works).
'''
def __init__(self, file_system):
self._file_system = file_system
# Updates are stored as TestFileSystems because it already implements a
# bunch of logic to intepret paths into dictionaries.
self._updates = []
self._stat_tracker = _StatTracker()
self._read_count = 0
self._read_resolve_count = 0
self._stat_count = 0
@staticmethod
def Create(file_system, updates):
mock_file_system = MockFileSystem(file_system)
for update in updates:
mock_file_system.Update(update)
return mock_file_system
#
# FileSystem implementation.
#
def Read(self, paths, skip_not_found=False):
'''Reads |paths| from |_file_system|, then applies the most recent update
from |_updates|, if any.
'''
self._read_count += 1
def next(result):
self._read_resolve_count += 1
for path in result.iterkeys():
update = self._GetMostRecentUpdate(path)
if update is not None:
result[path] = update
return result
return self._file_system.Read(paths,
skip_not_found=skip_not_found).Then(next)
def Refresh(self):
return self._file_system.Refresh()
def _GetMostRecentUpdate(self, path):
'''Returns the latest update for the file at |path|, or None if |path|
has never been updated.
'''
for update in reversed(self._updates):
try:
return update.ReadSingle(path).Get()
except FileNotFoundError:
pass
return None
def Stat(self, path):
self._stat_count += 1
# This only supports numeric stat values since we need to add to it. In
# reality the logic here could just be to randomly mutate the stat values
# every time there's an Update but that's less meaningful for testing.
def stradd(a, b):
return str(int(a) + b)
stat = self._file_system.Stat(path)
stat.version = stradd(stat.version, self._stat_tracker.GetVersion(path))
if stat.child_versions:
for child_path, child_version in stat.child_versions.iteritems():
stat.child_versions[child_path] = stradd(
stat.child_versions[child_path],
self._stat_tracker.GetVersion(posixpath.join(path, child_path)))
return stat
def GetIdentity(self):
return self._file_system.GetIdentity()
def __str__(self):
return repr(self)
def __repr__(self):
return 'MockFileSystem(read_count=%s, stat_count=%s, updates=%s)' % (
self._read_count, self._stat_count, len(self._updates))
#
# Testing methods.
#
def GetStatCount(self):
return self._stat_count
def CheckAndReset(self, stat_count=0, read_count=0, read_resolve_count=0):
'''Returns a tuple (success, error). Use in tests like:
self.assertTrue(*object_store.CheckAndReset(...))
'''
errors = []
for desc, expected, actual in (
('read_count', read_count, self._read_count),
('read_resolve_count', read_resolve_count, self._read_resolve_count),
('stat_count', stat_count, self._stat_count)):
if actual != expected:
errors.append('%s: expected %s got %s' % (desc, expected, actual))
try:
return (len(errors) == 0, ', '.join(errors))
finally:
self.Reset()
def Reset(self):
self._read_count = 0
self._read_resolve_count = 0
self._stat_count = 0
def Update(self, update):
self._updates.append(TestFileSystem(update))
for path in _List(update).iterkeys():
# Any files (not directories) which changed are now at the version
# derived from |_updates|.
if not IsDirectory(path):
self._stat_tracker.SetVersion(path, len(self._updates))
|
bsd-3-clause
| 5,900,303,143,631,175,000 | -5,889,335,922,823,160,000 | 31.666667 | 78 | 0.659555 | false |
annarev/tensorflow
|
tensorflow/python/kernel_tests/bitcast_op_test.py
|
12
|
3137
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.bitcast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BitcastTest(test.TestCase):
def _testBitcast(self, x, datatype, shape):
with test_util.use_gpu():
tf_ans = array_ops.bitcast(x, datatype)
out = self.evaluate(tf_ans)
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
self.assertEqual(buff_before, buff_after)
self.assertEqual(tf_ans.get_shape(), shape)
self.assertEqual(tf_ans.dtype, datatype)
def testSmaller(self):
x = np.random.rand(3, 2)
datatype = dtypes.int8
shape = [3, 2, 8]
self._testBitcast(x, datatype, shape)
def testLarger(self):
x = np.arange(16, dtype=np.int8).reshape([4, 4])
datatype = dtypes.int32
shape = [4]
self._testBitcast(x, datatype, shape)
def testSameDtype(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, x.dtype, shape)
def testSameSize(self):
x = np.random.rand(3, 4)
shape = [3, 4]
self._testBitcast(x, dtypes.int64, shape)
def testErrors(self):
x = np.zeros([1, 1], np.int8)
datatype = dtypes.int32
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Cannot bitcast from 6 to 3"):
array_ops.bitcast(x, datatype, None)
def testEmpty(self):
x = np.ones([], np.int32)
datatype = dtypes.int8
shape = [4]
self._testBitcast(x, datatype, shape)
def testUnknownShape(self):
# Need to use placeholder for unknown shape
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
datatype = dtypes.int8
array_ops.bitcast(x, datatype, None)
@test_util.disable_tfrt("b/169901260")
def testQuantizedType(self):
shape = [3, 4]
x = np.zeros(shape, np.uint16)
datatype = dtypes.quint16
self._testBitcast(x, datatype, shape)
def testUnsignedType(self):
shape = [3, 4]
x = np.zeros(shape, np.int64)
datatype = dtypes.uint64
self._testBitcast(x, datatype, shape)
if __name__ == "__main__":
test.main()
|
apache-2.0
| 224,502,792,652,779,140 | 6,966,641,626,636,202,000 | 30.37 | 80 | 0.664967 | false |
naritta/numpy
|
numpy/distutils/tests/test_npy_pkg_config.py
|
70
|
3069
|
from __future__ import division, absolute_import, print_function
import os
from tempfile import mkstemp
from numpy.testing import *
from numpy.distutils.npy_pkg_config import read_config, parse_flags
simple = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[default]
cflags = -I/usr/include
libs = -L/usr/lib
"""
simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
'version': '0.1', 'name': 'foo'}
simple_variable = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[variables]
prefix = /foo/bar
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir}
"""
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
class TestLibraryInfo(TestCase):
def test_simple(self):
fd, filename = mkstemp('foo.ini')
try:
pkg = os.path.splitext(filename)[0]
try:
os.write(fd, simple.encode('ascii'))
finally:
os.close(fd)
out = read_config(pkg)
self.assertTrue(out.cflags() == simple_d['cflags'])
self.assertTrue(out.libs() == simple_d['libflags'])
self.assertTrue(out.name == simple_d['name'])
self.assertTrue(out.version == simple_d['version'])
finally:
os.remove(filename)
def test_simple_variable(self):
fd, filename = mkstemp('foo.ini')
try:
pkg = os.path.splitext(filename)[0]
try:
os.write(fd, simple_variable.encode('ascii'))
finally:
os.close(fd)
out = read_config(pkg)
self.assertTrue(out.cflags() == simple_variable_d['cflags'])
self.assertTrue(out.libs() == simple_variable_d['libflags'])
self.assertTrue(out.name == simple_variable_d['name'])
self.assertTrue(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
self.assertTrue(out.cflags() == '-I/Users/david/include')
finally:
os.remove(filename)
class TestParseFlags(TestCase):
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
self.assertTrue(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
self.assertTrue(d['include_dirs'] == ['/usr/include'])
self.assertTrue(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
self.assertTrue(d['include_dirs'] == ['/usr/include'])
self.assertTrue(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
self.assertTrue(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
self.assertTrue(d['libraries'] == ['foo', 'bar'])
|
bsd-3-clause
| -6,103,138,419,948,534,000 | 4,398,135,545,245,449,700 | 30.316327 | 82 | 0.568263 | false |
johnewart/django-ldap-wizard
|
django_ldap_wizard/views.py
|
1
|
2059
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext, Template, Context, loader
from django.utils.safestring import mark_safe
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User, Group
import django.contrib.auth
import ldap
from django.contrib.auth.decorators import login_required
from django.template import RequestContext, Context, loader
from django.db import models
def setup(request):
t = loader.get_template('django_ldap_wizard/setup.html')
successful_connect = False
if request.GET:
try:
uri = request.GET.get("ldap_url", "")
bind_dn = request.GET.get('bind_dn', "")
bind_pw = request.GET.get('bind_pw', "")
base_dn = request.GET.get('base_dn', "")
con = ldap.initialize(uri)
con.simple_bind_s( bind_dn, bind_pw )
message = "Successfully tested your connection settings."
successful_connect = True
except ldap.SERVER_DOWN:
message = "Unable to contact LDAP server -- perhaps you specified the wrong URI or the server is not accepting connections"
except ldap.INVALID_CREDENTIALS:
message = "Unable to authenticate using those credentials. Please double check them!"
except ldap.LDAPError, e:
if type(e.message) == dict and e.message.has_key('desc'):
message = e.message['desc']
else:
message = "Invalid input data, check your settings"
else:
uri = ""
bind_dn = ""
bind_pw = ""
base_dn = ""
message = ""
ctx = {
"uri": uri,
"bind_dn": bind_dn,
"bind_pw": bind_pw,
"base_dn": base_dn,
"message": message,
"success": successful_connect
}
c = RequestContext(request, ctx)
return HttpResponse(t.render(c))
|
bsd-3-clause
| -4,123,737,724,344,745,500 | 5,854,850,023,194,295,000 | 34.5 | 135 | 0.626032 | false |
PauliusLabanauskis/AlgorithmsDataStructures
|
algo_pathfinding/main.py
|
1
|
1083
|
from graph_input import read_graph
def choose_node(reachable, explored):
for node in reachable:
if node not in explored:
return node
def find_path(start_node, goal_node, graph):
reachable = [start_node]
explored = set()
previous = {start_node: None}
while len(reachable) > 0:
cur_node = choose_node(reachable, explored)
if cur_node == goal_node:
return build_path(goal_node, previous)
reachable.remove(cur_node)
explored.add(cur_node)
new_reachable = graph[cur_node] - explored
for adjacent in new_reachable:
if adjacent not in reachable:
previous[adjacent] = cur_node
reachable.append(adjacent)
def build_path(to_node, previous_nodes):
path = []
while to_node != None:
path.append(to_node)
to_node = previous_nodes[to_node]
return path
def main():
graph = read_graph('sample_data/sample_1.txt')
path = find_path('A', 'T', graph)
print path
if __name__ == '__main__':
main()
|
unlicense
| 7,279,584,669,171,843,000 | 47,842,299,075,948,200 | 28.297297 | 51 | 0.590951 | false |
sgml/popcorn_maker
|
vendor-local/lib/python/whoosh/lang/porter2.py
|
117
|
8314
|
"""An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in stemming.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y') and len(word) > 1:
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
|
bsd-3-clause
| 193,411,265,271,326,700 | 3,858,596,405,805,788,700 | 25.5623 | 87 | 0.479192 | false |
lemonad/methodiki
|
methodiki/methods/forms.py
|
1
|
1932
|
# -*- coding: utf-8 -*-
from django.forms import ModelForm, TextInput
from django.utils.translation import ugettext_lazy as _
from markitup.widgets import MarkItUpWidget
from taggit.forms import TagWidget
from common.forms import ModelFormRequestUser
from models import Method, MethodBonus
class MethodForm(ModelFormRequestUser):
""" Form for adding and editing methods """
class Meta:
model = Method
fields = ('title', 'description', 'tags', 'editor_comment')
widgets = {
'title': TextInput(attrs={'class': 'span-18 last input'}),
'description': MarkItUpWidget(auto_preview=False,
attrs={'class':
'span-18 last input'}),
'editor_comment': TextInput(attrs={'class':
'span-18 last input'}),
'tags': TagWidget(attrs={'class': 'span-18 last input'}),
}
def __init__(self, request, *args, **kwargs):
super(MethodForm, self).__init__(request, *args, **kwargs)
self.last_edited_by = request.user
def save(self, commit=True):
obj = super(MethodForm, self).save(commit=False)
obj.last_edited_by = self.user
if commit:
obj.save()
self.save_m2m() # Be careful with ModelForms and commit=False
return obj
class MethodBonusForm(ModelFormRequestUser):
""" Form for adding and editing method bonus' """
class Meta:
model = MethodBonus
fields = ('description',)
widgets = {
'description': MarkItUpWidget(auto_preview=True,
attrs={'class':
'span-18 last input'}),
}
def __init__(self, request, *args, **kwargs):
super(MethodBonusForm, self).__init__(request, *args, **kwargs)
|
mit
| -6,929,256,529,585,195,000 | -7,702,922,580,483,064,000 | 35.45283 | 74 | 0.552795 | false |
tersmitten/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py
|
12
|
8115
|
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mariadbconfiguration
version_added: "2.8"
short_description: Manage Configuration instance.
description:
- Create, update and delete instance of Configuration.
options:
resource_group:
description:
- The name of the resource group that contains the resource.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the server configuration.
required: True
value:
description:
- Value of the configuration.
state:
description:
- Assert the state of the MariaDB configuration. Use C(present) to update setting, or
C(absent) to reset to default value.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
- "Matti Ranta (@techknowlogick)"
'''
EXAMPLES = '''
- name: Update SQL Server setting
azure_rm_mariadbconfiguration:
resource_group: myResourceGroup
server_name: myServer
name: event_scheduler
value: "ON"
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi
gurations/event_scheduler"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.mysql import MariaDBManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMMariaDbConfiguration(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
value=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.value = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
old_response = self.get_configuration()
if not old_response:
self.log("Configuration instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Configuration instance already exists")
if self.state == 'absent' and old_response['source'] == 'user-override':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Configuration instance has to be deleted or may be updated")
if self.value != old_response.get('value'):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Configuration instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_configuration()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Configuration instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_configuration()
else:
self.log("Configuration instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_configuration(self):
self.log("Creating / Updating the Configuration instance {0}".format(self.name))
try:
response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
value=self.value,
source='user-override')
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Configuration instance.')
self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
return response.as_dict()
def delete_configuration(self):
self.log("Deleting the Configuration instance {0}".format(self.name))
try:
response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
source='system-default')
except CloudError as e:
self.log('Error attempting to delete the Configuration instance.')
self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
return True
def get_configuration(self):
self.log("Checking if the Configuration instance {0} is present".format(self.name))
found = False
try:
response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Configuration instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Configuration instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMMariaDbConfiguration()
if __name__ == '__main__':
main()
|
gpl-3.0
| 8,203,178,500,661,866,000 | -3,983,368,034,330,525,700 | 32.533058 | 151 | 0.545533 | false |
axilleas/ansible
|
lib/ansible/module_utils/cloudstack.py
|
118
|
13221
|
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
class AnsibleCloudStack:
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
self.cs = CloudStack(**read_config())
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
existing_tags = self.cs.listTags(resourceid=resource['id'])
if existing_tags:
return existing_tags['tag']
return []
def _delete_tags(self, resource, resource_type, tags):
existing_tags = resource['tags']
tags_to_delete = []
for existing_tag in existing_tags:
if existing_tag['key'] in tags:
if existing_tag['value'] != tags[key]:
tags_to_delete.append(existing_tag)
else:
tags_to_delete.append(existing_tag)
if tags_to_delete:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_delete
self.cs.deleteTags(**args)
def _create_tags(self, resource, resource_type, tags):
tags_to_create = []
for i, tag_entry in enumerate(tags):
tag = {
'key': tag_entry['key'],
'value': tag_entry['value'],
}
tags_to_create.append(tag)
if tags_to_create:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags_to_create
self.cs.createTags(**args)
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._delete_tags(resource, resource_type, tags)
self._create_tags(resource, resource_type, tags)
resource['tags'] = self.get_tags(resource)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.cs.listCapabilities()
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
# TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
return self.poll_job(job=job, key=key)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
|
gpl-3.0
| -2,748,566,036,425,485,300 | 8,514,268,846,862,280,000 | 34.923913 | 110 | 0.571407 | false |
anthonyfok/frescobaldi
|
frescobaldi_app/gadgets/drag.py
|
1
|
4442
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Event-filtering objects and helper functions to drag things.
"""
from PyQt5.QtCore import QEvent, QFileInfo, QMimeData, QObject, Qt, QUrl
from PyQt5.QtGui import QDrag
from PyQt5.QtWidgets import QApplication, QFileIconProvider
class ComboDrag(QObject):
"""Enables dragging from a QComboBox.
Instantiate this with a QComboBox as parent to enable dragging the
current item.
By default, drags a filename got from the current index under the
Qt.EditRole. Change the role by changing the 'role' instance attribute.
"""
column = 0
role = Qt.EditRole
def __init__(self, combobox):
super(ComboDrag, self).__init__(combobox)
self._dragpos = None
combobox.installEventFilter(self)
def eventFilter(self, combobox, ev):
if ev.type() == QEvent.MouseButtonPress and ev.button() == Qt.LeftButton:
self._dragpos = ev.pos()
return not combobox.isEditable()
elif (ev.type() == QEvent.MouseMove and ev.buttons() & Qt.LeftButton
and combobox.count() >0):
return self.mouseMoved(combobox, ev.pos()) or False
elif (ev.type() == QEvent.MouseButtonRelease
and ev.button() == Qt.LeftButton and not combobox.isEditable()):
combobox.mousePressEvent(ev)
return False
def mouseMoved(self, combobox, pos):
if (self._dragpos is not None
and (pos - self._dragpos).manhattanLength()
>= QApplication.startDragDistance()):
self.startDrag(combobox)
return True
def startDrag(self, combobox):
index = combobox.model().index(combobox.currentIndex(), self.column)
filename = combobox.model().data(index, self.role)
icon = combobox.model().data(index, Qt.DecorationRole)
dragFile(combobox, filename, icon, Qt.CopyAction)
class Dragger(QObject):
"""Drags anything from any widget.
Use dragger.installEventFilter(widget) to have it drag.
"""
def __init__(self, parent=None):
super(Dragger, self).__init__(parent)
self._dragpos = None
if parent:
parent.installEventFilter(self)
def eventFilter(self, widget, ev):
if ev.type() == QEvent.MouseButtonPress and ev.button() == Qt.LeftButton:
self._dragpos = ev.pos()
return True
elif ev.type() == QEvent.MouseMove and ev.buttons() & Qt.LeftButton:
return self.mouseMoved(widget, ev.pos()) or False
return False
def mouseMoved(self, widget, pos):
if (self._dragpos is not None
and (pos - self._dragpos).manhattanLength()
>= QApplication.startDragDistance()):
self.startDrag(widget)
return True
def startDrag(self, widget):
"""Reimplement to start a drag."""
class FileDragger(Dragger):
def filename(self):
"""Should return the filename to drag."""
def startDrag(self, widget):
filename = self.filename()
if filename:
dragFile(widget, filename)
def dragFile(widget, filename, icon=None, dropactions=Qt.CopyAction):
"""Starts dragging the given local file from the widget."""
if icon is None or icon.isNull():
icon = QFileIconProvider().icon(QFileInfo(filename))
drag = QDrag(widget)
data = QMimeData()
data.setUrls([QUrl.fromLocalFile(filename)])
drag.setMimeData(data)
drag.setPixmap(icon.pixmap(32))
drag.exec_(dropactions)
|
gpl-2.0
| 8,414,519,041,052,703,000 | 7,594,735,501,820,585,000 | 34.253968 | 81 | 0.652634 | false |
milankl/swm
|
calc/misc/Re_hist_calc_bs_old.py
|
1
|
4307
|
## HISTOGRAM COMPUTATIONS FOR REYNOLDS AND ROSSBY NUMBERS
from __future__ import print_function
# path
import os
path = '/home/mkloewer/python/swm/'
#path = os.path.dirname(os.getcwd()) + '/' # on level above
os.chdir(path) # change working directory
import time as tictoc
import numpy as np
from scipy import sparse
# OPTIONS
runfolder = [14]
print('Calculating Reynolds histograms from run ' + str(runfolder))
ReH = []
Re_mean = []
Re_median = []
## read data - calculate each run separately
for r,i in zip(runfolder,range(len(runfolder))):
runpath = path+'stoch/data/run%04i' % r
skip = 5*365
u = np.load(runpath+'/u_sub.npy')[skip:,...]
v = np.load(runpath+'/v_sub.npy')[skip:,...]
eta = np.load(runpath+'/h_sub.npy')[skip:,...]
e = np.load(runpath+'/e_sub.npy')[skip:,...]
t = np.load(runpath+'/t_sub.npy')[skip:,...]
print('run %i read.' % r)
## read param
global param
param = np.load(runpath+'/param.npy').all()
# import functions
exec(open(path+'swm_param.py').read())
exec(open(path+'swm_operators.py').read())
param['output'] = 0
set_grad_mat()
set_interp_mat()
set_coriolis()
tlen = len(t)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## reshape u,v
u = u.reshape((tlen,param['Nu'])).T
v = v.reshape((tlen,param['Nv'])).T
h = eta.reshape((tlen,param['NT'])).T + param['H']
e = e.reshape((tlen,param['NT'])).T
print('Reshape done.')
## COMPUTE REYNOLDS, ROSSBY
u_T = IuT.dot(u)
v_T = IvT.dot(v)
print('u,v interpolation done.')
#advective term
adv_u = u_T*Gux.dot(u) + v_T*IqT.dot(Guy.dot(u))
adv_v = u_T*IqT.dot(Gvx.dot(v)) + v_T*Gvy.dot(v)
del u_T,v_T
adv_term = np.sqrt(adv_u**2 + adv_v**2)
del adv_u, adv_v
print('Advection term done.')
#diffusive term
S = (Gux.dot(u)-Gvy.dot(v),G2vx.dot(v) + G2uy.dot(u))
del u,v
hS = (h*S[0],ITq.dot(h)*S[1])
del S
print('Stress tensor S done.')
diff_u = (GTx.dot(hS[0]) + Gqy.dot(hS[1])) / ITu.dot(h)
diff_v = (Gqx.dot(hS[1]) - GTy.dot(hS[0])) / ITv.dot(h)
print('Harmonic part done.')
# biharmonic stress tensor R = (R11, R12, R12, -R11), store only R11, R12
R = (Gux.dot(diff_u) - Gvy.dot(diff_v), G2vx.dot(diff_v) + G2uy.dot(diff_u))
del diff_u, diff_v
nuhR = (param['nu_B']*h*R[0],param['nu_B']*ITq.dot(h)*R[1])
del R
print('Stress tensor R done.')
bidiff_u = (GTx.dot(nuhR[0]) + Gqy.dot(nuhR[1])) / ITu.dot(h)
bidiff_v = (Gqx.dot(nuhR[1]) - GTy.dot(nuhR[0])) / ITv.dot(h)
del nuhR
print('Biharmonic part done.')
# backscatter
nu_back = -param['c_back']*param['max_dxdy']*np.sqrt(2*e.clip(0,e.max()))
del e
nu_back_hS0 = nu_back*hS[0]
nu_back_hS1 = ITq.dot(nu_back)*hS[1]
print('nu_back calculated.')
del nu_back
back_diff_u = (GTx.dot(nu_back_hS0) + Gqy.dot(nu_back_hS1)) / ITu.dot(h)
back_diff_v = (Gqx.dot(nu_back_hS1) - GTy.dot(nu_back_hS0)) / ITv.dot(h)
del nu_back_hS0,nu_back_hS1
diff_term = np.sqrt(IuT.dot((bidiff_u + back_diff_u)**2) + IvT.dot((bidiff_v + back_diff_v)**2))
#diff_term = np.sqrt(IuT.dot(bidiff_u**2) + IvT.dot(bidiff_v**2))
print('Diff term done.')
del bidiff_u,bidiff_v,back_diff_u,back_diff_v
# actual number
Re = (adv_term / diff_term).flatten()
print('Re computed.')
del adv_term, diff_term
Re_mean.append(Re.mean())
Re_median.append(np.median(Re))
Re = np.log10(Re)
# histogram
Re_min = -3. # in log scale
Re_max = 5.
N = 300
ReH_temp,Re_edges = np.histogram(Re,np.linspace(Re_min,Re_max,N))
print('Re histogram done.')
del Re
# store each run in a list
ReH.append(ReH_temp)
Re_mid = Re_edges[:-1] + np.diff(Re_edges)/2.
ReH = np.array(ReH).sum(axis=0)
Re_mean = np.array(Re_mean).mean()
Re_median = np.median(np.array(Re_median)) #actually median of medians though...
## STORING in last
dic = dict()
all_var2export = ['ReH','Re_mid','Re_edges','Re_mean','Re_median']
for vars in all_var2export:
exec('dic[vars] ='+vars)
np.save(runpath+'/analysis/Re_hist.npy',dic)
|
gpl-3.0
| 9,018,381,181,717,123,000 | -3,655,133,419,579,297,300 | 27.523179 | 100 | 0.574878 | false |
rodrigolucianocosta/ControleEstoque
|
rOne/Storage101/django-localflavor/django-localflavor-1.3/tests/test_pl.py
|
4
|
22841
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.pl.forms import (PLCountySelect, PLNationalIDCardNumberField, PLNIPField, PLPESELField,
PLPostalCodeField, PLProvinceSelect, PLREGONField)
class PLLocalFlavorTests(SimpleTestCase):
def test_PLProvinceSelect(self):
f = PLProvinceSelect()
out = '''<select name="voivodeships">
<option value="lower_silesia">Lower Silesian</option>
<option value="kuyavia-pomerania">Kuyavian-Pomeranian</option>
<option value="lublin">Lublin</option>
<option value="lubusz">Lubusz</option>
<option value="lodz">Lodz</option>
<option value="lesser_poland">Lesser Poland</option>
<option value="masovia">Masovian</option>
<option value="opole">Opole</option>
<option value="subcarpatia">Subcarpathian</option>
<option value="podlasie">Podlasie</option>
<option value="pomerania" selected="selected">Pomeranian</option>
<option value="silesia">Silesian</option>
<option value="swietokrzyskie">Swietokrzyskie</option>
<option value="warmia-masuria">Warmian-Masurian</option>
<option value="greater_poland">Greater Poland</option>
<option value="west_pomerania">West Pomeranian</option>
</select>'''
self.assertHTMLEqual(f.render('voivodeships', 'pomerania'), out)
def test_PLCountrySelect(self):
f = PLCountySelect()
out = '''<select name="administrativeunit">
<option value="wroclaw">Wroc\u0142aw</option>
<option value="jeleniagora">Jelenia G\xf3ra</option>
<option value="legnica">Legnica</option>
<option value="boleslawiecki">boles\u0142awiecki</option>
<option value="dzierzoniowski">dzier\u017coniowski</option>
<option value="glogowski">g\u0142ogowski</option>
<option value="gorowski">g\xf3rowski</option>
<option value="jaworski">jaworski</option>
<option value="jeleniogorski">jeleniog\xf3rski</option>
<option value="kamiennogorski">kamiennog\xf3rski</option>
<option value="klodzki">k\u0142odzki</option>
<option value="legnicki">legnicki</option>
<option value="lubanski">luba\u0144ski</option>
<option value="lubinski">lubi\u0144ski</option>
<option value="lwowecki">lw\xf3wecki</option>
<option value="milicki">milicki</option>
<option value="olesnicki">ole\u015bnicki</option>
<option value="olawski">o\u0142awski</option>
<option value="polkowicki">polkowicki</option>
<option value="strzelinski">strzeli\u0144ski</option>
<option value="sredzki">\u015bredzki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="trzebnicki">trzebnicki</option>
<option value="walbrzyski">wa\u0142brzyski</option>
<option value="wolowski">wo\u0142owski</option>
<option value="wroclawski">wroc\u0142awski</option>
<option value="zabkowicki">z\u0105bkowicki</option>
<option value="zgorzelecki">zgorzelecki</option>
<option value="zlotoryjski">z\u0142otoryjski</option>
<option value="bydgoszcz">Bydgoszcz</option>
<option value="torun">Toru\u0144</option>
<option value="wloclawek">W\u0142oc\u0142awek</option>
<option value="grudziadz">Grudzi\u0105dz</option>
<option value="aleksandrowski">aleksandrowski</option>
<option value="brodnicki">brodnicki</option>
<option value="bydgoski">bydgoski</option>
<option value="chelminski">che\u0142mi\u0144ski</option>
<option value="golubsko-dobrzynski">golubsko-dobrzy\u0144ski</option>
<option value="grudziadzki">grudzi\u0105dzki</option>
<option value="inowroclawski">inowroc\u0142awski</option>
<option value="lipnowski">lipnowski</option>
<option value="mogilenski">mogile\u0144ski</option>
<option value="nakielski">nakielski</option>
<option value="radziejowski">radziejowski</option>
<option value="rypinski">rypi\u0144ski</option>
<option value="sepolenski">s\u0119pole\u0144ski</option>
<option value="swiecki">\u015bwiecki</option>
<option value="torunski">toru\u0144ski</option>
<option value="tucholski">tucholski</option>
<option value="wabrzeski">w\u0105brzeski</option>
<option value="wloclawski">w\u0142oc\u0142awski</option>
<option value="zninski">\u017cni\u0144ski</option>
<option value="lublin">Lublin</option>
<option value="biala-podlaska">Bia\u0142a Podlaska</option>
<option value="chelm">Che\u0142m</option>
<option value="zamosc">Zamo\u015b\u0107</option>
<option value="bialski">bialski</option>
<option value="bilgorajski">bi\u0142gorajski</option>
<option value="chelmski">che\u0142mski</option>
<option value="hrubieszowski">hrubieszowski</option>
<option value="janowski">janowski</option>
<option value="krasnostawski">krasnostawski</option>
<option value="krasnicki">kra\u015bnicki</option>
<option value="lubartowski">lubartowski</option>
<option value="lubelski">lubelski</option>
<option value="leczynski">\u0142\u0119czy\u0144ski</option>
<option value="lukowski">\u0142ukowski</option>
<option value="opolski">opolski</option>
<option value="parczewski">parczewski</option>
<option value="pulawski">pu\u0142awski</option>
<option value="radzynski">radzy\u0144ski</option>
<option value="rycki">rycki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wlodawski">w\u0142odawski</option>
<option value="zamojski">zamojski</option>
<option value="gorzow-wielkopolski">Gorz\xf3w Wielkopolski</option>
<option value="zielona-gora">Zielona G\xf3ra</option>
<option value="gorzowski">gorzowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="miedzyrzecki">mi\u0119dzyrzecki</option>
<option value="nowosolski">nowosolski</option>
<option value="slubicki">s\u0142ubicki</option>
<option value="strzelecko-drezdenecki">strzelecko-drezdenecki</option>
<option value="sulecinski">sule\u0144ci\u0144ski</option>
<option value="swiebodzinski">\u015bwiebodzi\u0144ski</option>
<option value="wschowski">wschowski</option>
<option value="zielonogorski">zielonog\xf3rski</option>
<option value="zaganski">\u017caga\u0144ski</option>
<option value="zarski">\u017carski</option>
<option value="lodz">\u0141\xf3d\u017a</option>
<option value="piotrkow-trybunalski">Piotrk\xf3w Trybunalski</option>
<option value="skierniewice">Skierniewice</option>
<option value="belchatowski">be\u0142chatowski</option>
<option value="brzezinski">brzezi\u0144ski</option>
<option value="kutnowski">kutnowski</option>
<option value="laski">\u0142aski</option>
<option value="leczycki">\u0142\u0119czycki</option>
<option value="lowicki">\u0142owicki</option>
<option value="lodzki wschodni">\u0142\xf3dzki wschodni</option>
<option value="opoczynski">opoczy\u0144ski</option>
<option value="pabianicki">pabianicki</option>
<option value="pajeczanski">paj\u0119cza\u0144ski</option>
<option value="piotrkowski">piotrkowski</option>
<option value="poddebicki">podd\u0119bicki</option>
<option value="radomszczanski">radomszcza\u0144ski</option>
<option value="rawski">rawski</option>
<option value="sieradzki">sieradzki</option>
<option value="skierniewicki">skierniewicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wielunski">wielu\u0144ski</option>
<option value="wieruszowski">wieruszowski</option>
<option value="zdunskowolski">zdu\u0144skowolski</option>
<option value="zgierski">zgierski</option>
<option value="krakow">Krak\xf3w</option>
<option value="tarnow">Tarn\xf3w</option>
<option value="nowy-sacz">Nowy S\u0105cz</option>
<option value="bochenski">boche\u0144ski</option>
<option value="brzeski">brzeski</option>
<option value="chrzanowski">chrzanowski</option>
<option value="dabrowski">d\u0105browski</option>
<option value="gorlicki">gorlicki</option>
<option value="krakowski">krakowski</option>
<option value="limanowski">limanowski</option>
<option value="miechowski">miechowski</option>
<option value="myslenicki">my\u015blenicki</option>
<option value="nowosadecki">nowos\u0105decki</option>
<option value="nowotarski">nowotarski</option>
<option value="olkuski">olkuski</option>
<option value="oswiecimski">o\u015bwi\u0119cimski</option>
<option value="proszowicki">proszowicki</option>
<option value="suski">suski</option>
<option value="tarnowski">tarnowski</option>
<option value="tatrzanski">tatrza\u0144ski</option>
<option value="wadowicki">wadowicki</option>
<option value="wielicki">wielicki</option>
<option value="warszawa">Warszawa</option>
<option value="ostroleka">Ostro\u0142\u0119ka</option>
<option value="plock">P\u0142ock</option>
<option value="radom">Radom</option>
<option value="siedlce">Siedlce</option>
<option value="bialobrzeski">bia\u0142obrzeski</option>
<option value="ciechanowski">ciechanowski</option>
<option value="garwolinski">garwoli\u0144ski</option>
<option value="gostyninski">gostyni\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="grojecki">gr\xf3jecki</option>
<option value="kozienicki">kozenicki</option>
<option value="legionowski">legionowski</option>
<option value="lipski">lipski</option>
<option value="losicki">\u0142osicki</option>
<option value="makowski">makowski</option>
<option value="minski">mi\u0144ski</option>
<option value="mlawski">m\u0142awski</option>
<option value="nowodworski">nowodworski</option>
<option value="ostrolecki">ostro\u0142\u0119cki</option>
<option value="ostrowski">ostrowski</option>
<option value="otwocki">otwocki</option>
<option value="piaseczynski">piaseczy\u0144ski</option>
<option value="plocki">p\u0142ocki</option>
<option value="plonski">p\u0142o\u0144ski</option>
<option value="pruszkowski">pruszkowski</option>
<option value="przasnyski">przasnyski</option>
<option value="przysuski">przysuski</option>
<option value="pultuski">pu\u0142tuski</option>
<option value="radomski">radomski</option>
<option value="siedlecki">siedlecki</option>
<option value="sierpecki">sierpecki</option>
<option value="sochaczewski">sochaczewski</option>
<option value="sokolowski">soko\u0142owski</option>
<option value="szydlowiecki">szyd\u0142owiecki</option>
<option value="warszawski-zachodni">warszawski zachodni</option>
<option value="wegrowski">w\u0119growski</option>
<option value="wolominski">wo\u0142omi\u0144ski</option>
<option value="wyszkowski">wyszkowski</option>
<option value="zwolenski">zwole\u0144ski</option>
<option value="zurominski">\u017curomi\u0144ski</option>
<option value="zyrardowski">\u017cyrardowski</option>
<option value="opole">Opole</option>
<option value="brzeski">brzeski</option>
<option value="glubczycki">g\u0142ubczyski</option>
<option value="kedzierzynsko-kozielski">k\u0119dzierzy\u0144sko-kozielski</option>
<option value="kluczborski">kluczborski</option>
<option value="krapkowicki">krapkowicki</option>
<option value="namyslowski">namys\u0142owski</option>
<option value="nyski">nyski</option>
<option value="oleski">oleski</option>
<option value="opolski">opolski</option>
<option value="prudnicki">prudnicki</option>
<option value="strzelecki">strzelecki</option>
<option value="rzeszow">Rzesz\xf3w</option>
<option value="krosno">Krosno</option>
<option value="przemysl">Przemy\u015bl</option>
<option value="tarnobrzeg">Tarnobrzeg</option>
<option value="bieszczadzki">bieszczadzki</option>
<option value="brzozowski">brzozowski</option>
<option value="debicki">d\u0119bicki</option>
<option value="jaroslawski">jaros\u0142awski</option>
<option value="jasielski">jasielski</option>
<option value="kolbuszowski">kolbuszowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="leski">leski</option>
<option value="lezajski">le\u017cajski</option>
<option value="lubaczowski">lubaczowski</option>
<option value="lancucki">\u0142a\u0144cucki</option>
<option value="mielecki">mielecki</option>
<option value="nizanski">ni\u017ca\u0144ski</option>
<option value="przemyski">przemyski</option>
<option value="przeworski">przeworski</option>
<option value="ropczycko-sedziszowski">ropczycko-s\u0119dziszowski</option>
<option value="rzeszowski">rzeszowski</option>
<option value="sanocki">sanocki</option>
<option value="stalowowolski">stalowowolski</option>
<option value="strzyzowski">strzy\u017cowski</option>
<option value="tarnobrzeski">tarnobrzeski</option>
<option value="bialystok">Bia\u0142ystok</option>
<option value="lomza">\u0141om\u017ca</option>
<option value="suwalki">Suwa\u0142ki</option>
<option value="augustowski">augustowski</option>
<option value="bialostocki">bia\u0142ostocki</option>
<option value="bielski">bielski</option>
<option value="grajewski">grajewski</option>
<option value="hajnowski">hajnowski</option>
<option value="kolnenski">kolne\u0144ski</option>
<option value="\u0142omzynski">\u0142om\u017cy\u0144ski</option>
<option value="moniecki">moniecki</option>
<option value="sejnenski">sejne\u0144ski</option>
<option value="siemiatycki">siematycki</option>
<option value="sokolski">sok\xf3lski</option>
<option value="suwalski">suwalski</option>
<option value="wysokomazowiecki">wysokomazowiecki</option>
<option value="zambrowski">zambrowski</option>
<option value="gdansk">Gda\u0144sk</option>
<option value="gdynia">Gdynia</option>
<option value="slupsk">S\u0142upsk</option>
<option value="sopot">Sopot</option>
<option value="bytowski">bytowski</option>
<option value="chojnicki">chojnicki</option>
<option value="czluchowski">cz\u0142uchowski</option>
<option value="kartuski">kartuski</option>
<option value="koscierski">ko\u015bcierski</option>
<option value="kwidzynski">kwidzy\u0144ski</option>
<option value="leborski">l\u0119borski</option>
<option value="malborski">malborski</option>
<option value="nowodworski">nowodworski</option>
<option value="gdanski">gda\u0144ski</option>
<option value="pucki">pucki</option>
<option value="slupski">s\u0142upski</option>
<option value="starogardzki">starogardzki</option>
<option value="sztumski">sztumski</option>
<option value="tczewski">tczewski</option>
<option value="wejherowski">wejcherowski</option>
<option value="katowice" selected="selected">Katowice</option>
<option value="bielsko-biala">Bielsko-Bia\u0142a</option>
<option value="bytom">Bytom</option>
<option value="chorzow">Chorz\xf3w</option>
<option value="czestochowa">Cz\u0119stochowa</option>
<option value="dabrowa-gornicza">D\u0105browa G\xf3rnicza</option>
<option value="gliwice">Gliwice</option>
<option value="jastrzebie-zdroj">Jastrz\u0119bie Zdr\xf3j</option>
<option value="jaworzno">Jaworzno</option>
<option value="myslowice">Mys\u0142owice</option>
<option value="piekary-slaskie">Piekary \u015al\u0105skie</option>
<option value="ruda-slaska">Ruda \u015al\u0105ska</option>
<option value="rybnik">Rybnik</option>
<option value="siemianowice-slaskie">Siemianowice \u015al\u0105skie</option>
<option value="sosnowiec">Sosnowiec</option>
<option value="swietochlowice">\u015awi\u0119toch\u0142owice</option>
<option value="tychy">Tychy</option>
<option value="zabrze">Zabrze</option>
<option value="zory">\u017bory</option>
<option value="bedzinski">b\u0119dzi\u0144ski</option>
<option value="bielski">bielski</option>
<option value="bierunsko-ledzinski">bieru\u0144sko-l\u0119dzi\u0144ski</option>
<option value="cieszynski">cieszy\u0144ski</option>
<option value="czestochowski">cz\u0119stochowski</option>
<option value="gliwicki">gliwicki</option>
<option value="klobucki">k\u0142obucki</option>
<option value="lubliniecki">lubliniecki</option>
<option value="mikolowski">miko\u0142owski</option>
<option value="myszkowski">myszkowski</option>
<option value="pszczynski">pszczy\u0144ski</option>
<option value="raciborski">raciborski</option>
<option value="rybnicki">rybnicki</option>
<option value="tarnogorski">tarnog\xf3rski</option>
<option value="wodzislawski">wodzis\u0142awski</option>
<option value="zawiercianski">zawiercia\u0144ski</option>
<option value="zywiecki">\u017cywiecki</option>
<option value="kielce">Kielce</option>
<option value="buski">buski</option>
<option value="jedrzejowski">j\u0119drzejowski</option>
<option value="kazimierski">kazimierski</option>
<option value="kielecki">kielecki</option>
<option value="konecki">konecki</option>
<option value="opatowski">opatowski</option>
<option value="ostrowiecki">ostrowiecki</option>
<option value="pinczowski">pi\u0144czowski</option>
<option value="sandomierski">sandomierski</option>
<option value="skarzyski">skar\u017cyski</option>
<option value="starachowicki">starachowicki</option>
<option value="staszowski">staszowski</option>
<option value="wloszczowski">w\u0142oszczowski</option>
<option value="olsztyn">Olsztyn</option>
<option value="elblag">Elbl\u0105g</option>
<option value="bartoszycki">bartoszycki</option>
<option value="braniewski">braniewski</option>
<option value="dzialdowski">dzia\u0142dowski</option>
<option value="elblaski">elbl\u0105ski</option>
<option value="elcki">e\u0142cki</option>
<option value="gizycki">gi\u017cycki</option>
<option value="goldapski">go\u0142dapski</option>
<option value="ilawski">i\u0142awski</option>
<option value="ketrzynski">k\u0119trzy\u0144ski</option>
<option value="lidzbarski">lidzbarski</option>
<option value="mragowski">mr\u0105gowski</option>
<option value="nidzicki">nidzicki</option>
<option value="nowomiejski">nowomiejski</option>
<option value="olecki">olecki</option>
<option value="olsztynski">olszty\u0144ski</option>
<option value="ostrodzki">ostr\xf3dzki</option>
<option value="piski">piski</option>
<option value="szczycienski">szczycie\u0144ski</option>
<option value="wegorzewski">w\u0119gorzewski</option>
<option value="poznan">Pozna\u0144</option>
<option value="kalisz">Kalisz</option>
<option value="konin">Konin</option>
<option value="leszno">Leszno</option>
<option value="chodzieski">chodziejski</option>
<option value="czarnkowsko-trzcianecki">czarnkowsko-trzcianecki</option>
<option value="gnieznienski">gnie\u017anie\u0144ski</option>
<option value="gostynski">gosty\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="jarocinski">jaroci\u0144ski</option>
<option value="kaliski">kaliski</option>
<option value="kepinski">k\u0119pi\u0144ski</option>
<option value="kolski">kolski</option>
<option value="koninski">koni\u0144ski</option>
<option value="koscianski">ko\u015bcia\u0144ski</option>
<option value="krotoszynski">krotoszy\u0144ski</option>
<option value="leszczynski">leszczy\u0144ski</option>
<option value="miedzychodzki">mi\u0119dzychodzki</option>
<option value="nowotomyski">nowotomyski</option>
<option value="obornicki">obornicki</option>
<option value="ostrowski">ostrowski</option>
<option value="ostrzeszowski">ostrzeszowski</option>
<option value="pilski">pilski</option>
<option value="pleszewski">pleszewski</option>
<option value="poznanski">pozna\u0144ski</option>
<option value="rawicki">rawicki</option>
<option value="slupecki">s\u0142upecki</option>
<option value="szamotulski">szamotulski</option>
<option value="sredzki">\u015bredzki</option>
<option value="sremski">\u015bremski</option>
<option value="turecki">turecki</option>
<option value="wagrowiecki">w\u0105growiecki</option>
<option value="wolsztynski">wolszty\u0144ski</option>
<option value="wrzesinski">wrzesi\u0144ski</option>
<option value="zlotowski">z\u0142otowski</option>
<option value="bialogardzki">bia\u0142ogardzki</option>
<option value="choszczenski">choszcze\u0144ski</option>
<option value="drawski">drawski</option>
<option value="goleniowski">goleniowski</option>
<option value="gryficki">gryficki</option>
<option value="gryfinski">gryfi\u0144ski</option>
<option value="kamienski">kamie\u0144ski</option>
<option value="kolobrzeski">ko\u0142obrzeski</option>
<option value="koszalinski">koszali\u0144ski</option>
<option value="lobeski">\u0142obeski</option>
<option value="mysliborski">my\u015bliborski</option>
<option value="policki">policki</option>
<option value="pyrzycki">pyrzycki</option>
<option value="slawienski">s\u0142awie\u0144ski</option>
<option value="stargardzki">stargardzki</option>
<option value="szczecinecki">szczecinecki</option>
<option value="swidwinski">\u015bwidwi\u0144ski</option>
<option value="walecki">wa\u0142ecki</option>
</select>'''
self.assertHTMLEqual(f.render('administrativeunit', 'katowice'), out)
def test_PLPostalCodeField(self):
error_format = ['Enter a postal code in the format XX-XXX.']
valid = {
'41-403': '41-403',
}
invalid = {
'43--434': error_format,
}
self.assertFieldOutput(PLPostalCodeField, valid, invalid)
def test_PLNIPField(self):
error_format = ['Enter a tax number field (NIP) in the format XXX-XXX-XX-XX, XXX-XX-XX-XXX or XXXXXXXXXX.']
error_checksum = ['Wrong checksum for the Tax Number (NIP).']
valid = {
'646-241-41-24': '6462414124',
'646-24-14-124': '6462414124',
'6462414124': '6462414124',
}
invalid = {
'43-343-234-323': error_format,
'64-62-414-124': error_format,
'646-241-41-23': error_checksum,
}
self.assertFieldOutput(PLNIPField, valid, invalid)
def test_PLPESELField(self):
error_checksum = ['Wrong checksum for the National Identification Number.']
error_format = ['National Identification Number consists of 11 digits.']
valid = {
'80071610614': '80071610614',
}
invalid = {
'80071610610': error_checksum,
'80': error_format,
'800716106AA': error_format,
}
self.assertFieldOutput(PLPESELField, valid, invalid)
def test_PLNationalIDCardNumberField(self):
error_checksum = ['Wrong checksum for the National ID Card Number.']
error_format = ['National ID Card Number consists of 3 letters and 6 digits.']
valid = {
'ABC123458': 'ABC123458',
'abc123458': 'ABC123458',
}
invalid = {
'ABC123457': error_checksum,
'abc123457': error_checksum,
'a12Aaaaaa': error_format,
'AA1234443': error_format,
}
self.assertFieldOutput(PLNationalIDCardNumberField, valid, invalid)
def test_PLREGONField(self):
error_checksum = ['Wrong checksum for the National Business Register Number (REGON).']
error_format = ['National Business Register Number (REGON) consists of 9 or 14 digits.']
valid = {
'12345678512347': '12345678512347',
'590096454': '590096454',
# A special case where the checksum == 10 and the control
# digit == '0'
'391023200': '391023200',
}
invalid = {
'123456784': error_checksum,
'12345678412342': error_checksum,
'590096453': error_checksum,
# A special case where the checksum == 10,
# but the control digit != '0'
'111111111': error_checksum,
'590096': error_format,
}
self.assertFieldOutput(PLREGONField, valid, invalid)
|
gpl-3.0
| -1,860,341,534,316,253,700 | 4,740,087,923,082,041,000 | 45.709611 | 115 | 0.750799 | false |
Poofjunior/dxf2gcode
|
postpro/tspoptimisation.py
|
1
|
16557
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2008-2015
# Christian Kohlöffel
# Vinzenz Schulz
# Jean-Paul Schouwstra
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from __future__ import division
from random import random, shuffle
from math import floor, ceil
import globals.globals as g
from globals.six import text_type
import globals.constants as c
if c.PYQT5notPYQT4:
from PyQt5 import QtCore
else:
from PyQt4 import QtCore
import logging
logger = logging.getLogger("PostPro.TSP")
class TspOptimization(object):
"""
Optimization using the Travelling Salesman Problem (TSP) algorithim
"""
def __init__(self, st_end_points, order):
self.shape_nrs = len(st_end_points)
self.iterations = int(self.shape_nrs) * 10
self.pop_nr = min(int(ceil(self.shape_nrs / 8.0) * 8.0),
g.config.vars.Route_Optimisation['max_population'])
self.mutate_rate = g.config.vars.Route_Optimisation['mutation_rate']
self.opt_route = []
self.order = order
self.st_end_points = st_end_points
# Generate the Distance Matrix
self.DistanceMatrix = DistanceMatrixClass()
self.DistanceMatrix.generate_matrix(st_end_points)
# Generation Population
self.Population = PopulationClass([self.shape_nrs, self.pop_nr],
self.DistanceMatrix.matrix,
self.mutate_rate)
# Initialise the Result Class
self.Fittness = FittnessClass(self.Population,
list(range(self.Population.size[1])),
self.order)
self.Fittness.calc_st_fittness(self.DistanceMatrix.matrix,
range(self.shape_nrs))
# Anfang der Reihenfolge immer auf den letzen Punkt legen
# Beginning of the sequence always put the last point ???
self.Fittness.set_startpoint()
# Function to correct the order of the elements
self.Fittness.correct_constrain_order()
# logger.debug('Calculation of start fitness TSP: %s' %self)
# logger.debug('Size Distance matrix: %s', len(self.DistanceMatrix.matrix))
# Erstellen der ersten Ergebnisse
# Create the first result
self.Fittness.calc_cur_fittness(self.DistanceMatrix.matrix)
self.Fittness.select_best_fittness()
self.opt_route = self.Population.pop[self.Fittness.best_route]
# ERstellen der 2 opt Optimierungs Klasse
# Create the 2 opt optimization class ???
# self.optmove=ClassOptMove(dmatrix=self.DistanceMatrix.matrix, nei_nr=int(round(self.shape_nrs/10)))
def calc_next_iteration(self):
"""
calc_next_iteration()
"""
# Algorithmus ausfürhen
self.Population.genetic_algorithm(self.Fittness, self.mutate_rate)
# Für die Anzahl der Tours die Tours nach dem 2-opt Verfahren optimieren
# Optimise the number of Tours de Tours to the 2-opt method ???
# for pop_nr in range(len(self.Population.pop)):
# #print ("Vorher: %0.2f" %self.calc_tour_length(tours[tour_nr]))
# self.Population.pop[pop_nr]=self.optmove.do2optmove(self.Population.pop[pop_nr])
# #print ("Nachher: %0.2f" %self.calc_tour_length(tours[tour_nr]))
# Anfang der Reihenfolge immer auf den letzen Punkt legen
# Always put the last point at the beginning of the sequence
self.Fittness.set_startpoint()
# Korrektur Funktion um die Reihenfolge der Elemente zu korrigieren
# Function to correct the order of the elements
self.Fittness.correct_constrain_order()
# Fittness der jeweiligen Routen ausrechen
# Calculate fitness of each route
self.Fittness.calc_cur_fittness(self.DistanceMatrix.matrix)
# Straffunktion falls die Route nicht der gewünschten Reihenfolge entspricht
# Function if the route is not the desired sequence ???
# Best route to choose
self.Fittness.select_best_fittness()
self.opt_route = self.Population.pop[self.Fittness.best_route]
# logger.debug('Calculation next iteration of TSP: %s' %self)
def __str__(self):
#res = self.Population.pop
return "Iteration nrs: %i" % (self.iterations * 10) +\
"\nShape nrs: %i" % self.shape_nrs +\
"\nPopulation: %i" % self.pop_nr +\
"\nMutate rate: %0.2f" % self.mutate_rate +\
"\norder: %s" % self.order +\
"\nStart length: %0.1f" % self.Fittness.best_fittness[0] +\
"\nOpt. length: %0.1f" % self.Fittness.best_fittness[-1] +\
"\nOpt. route: %s" % self.opt_route
class PopulationClass:
def __init__(self, size, dmatrix, mutate_rate):
self.size = size
self.mutate_rate = mutate_rate
self.pop = []
self.rot = []
# logger.debug('The Population size is: %s' %self.size)
for pop_nr in range(self.size[1]):
# logger.debug("======= TSP initializing population nr %i =======" % pop_nr)
if g.config.vars.Route_Optimisation['begin_art'] == 'ordered':
self.pop.append(list(range(size[0])))
elif g.config.vars.Route_Optimisation['begin_art'] == 'random':
self.pop.append(self.random_begin(size[0]))
elif g.config.vars.Route_Optimisation['begin_art'] == 'heuristic':
self.pop.append(self.heuristic_begin(dmatrix[:]))
else:
logger.error(self.tr('Wrong begin art of TSP chosen'))
for rot_nr in range(size[0]):
self.rot.append(0)
def __str__(self):
string = "\nPopulation size: %i X %i \nMutate rate: %0.2f \nRotation Matrix:\n%s \nPop Matrix:"\
% (self.size[0], self.size[1], self.mutate_rate, self.rot)
for line in self.pop:
string += '\n' + str(line)
return string
def tr(self, string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return text_type(QtCore.QCoreApplication.translate("PopulationClass",
string_to_translate))
def random_begin(self, size):
"""
random_begin for TSP
"""
tour = list(range(size))
shuffle(tour)
return tour
def heuristic_begin(self, dmatrix):
"""
heuristic_begin for TSP
"""
tour = []
possibilities = list(range(len(dmatrix[0])))
start_nr = int(floor(random()*len(dmatrix[0])))
# Hinzufügen der Nr und entfernen aus possibilies
# Add and remove the number of possibilities
tour.append(start_nr)
possibilities.pop(possibilities.index(tour[-1]))
counter = 0
while len(possibilities):
counter += 1
tour.append(self.heuristic_find_next(tour[-1], possibilities, dmatrix))
possibilities.pop(possibilities.index(tour[-1]))
# if counter % 10 == 0:
# logger.debug("TSP heuristic searching nr %i" % counter)
return tour
def heuristic_find_next(self, start, possibilities, dmatrix):
"""
heuristic_find_next() for TSP
"""
# Auswahl der Entfernungen des nächsten Punkts
# The distances of the point selection
min_dist = 1e99
darray = dmatrix[start]
for pnr in possibilities:
if darray[pnr] < min_dist:
min_point = pnr
min_dist = darray[pnr]
return min_point
def genetic_algorithm(self, Result, mutate_rate):
"""
genetic_algorithm for TSP
"""
self.mutate_rate = mutate_rate
# Neue Population Matrix erstellen
# Create new Population Matrix
new_pop = []
for p_nr in range(self.size[1]):
new_pop.append([])
# Tournament Selection 1 between Parents (2 Parents remaining)
ts_r1 = list(range(self.size[1]))
shuffle(ts_r1)
winners_r1 = []
tmp_fittness = []
for nr in range(self.size[1] // 2):
if Result.cur_fittness[ts_r1[nr * 2]] < Result.cur_fittness[ts_r1[(nr * 2) + 1]]:
winners_r1.append(self.pop[ts_r1[nr * 2]])
tmp_fittness.append(Result.cur_fittness[ts_r1[nr * 2]])
else:
winners_r1.append(self.pop[ts_r1[(nr * 2) + 1]])
tmp_fittness.append(Result.cur_fittness[ts_r1[(nr * 2) + 1]])
# print(tmp_fittness)
# Tournament Selection 2 only one Parent remaining
ts_r2 = list(range(self.size[1] // 2))
shuffle(ts_r2)
for nr in range(self.size[1] // 4):
if tmp_fittness[ts_r2[nr * 2]] < tmp_fittness[ts_r2[(nr * 2) + 1]]:
winner = winners_r1[ts_r2[nr * 2]]
else:
winner = winners_r1[ts_r2[(nr * 2) + 1]]
# Schreiben der Gewinner in die neue Population Matrix
# print(winner)
for pnr in range(2):
new_pop[pnr * self.size[1] // 2 + nr] = winner[:]
# Crossover Gens from 2 Parents
crossover = list(range(self.size[1] // 2))
shuffle(crossover)
for nr in range(self.size[1] // 4):
# child = parent2
# Parents are the winners of the first round (Genetic Selection?)
parent1 = winners_r1[crossover[nr * 2]][:]
child = winners_r1[crossover[(nr * 2) + 1]][:]
# The genetic line that is exchanged in the child parent1
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
while indx[0] == indx[1]:
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
gens = parent1[indx[0]:indx[1] + 1]
# Remove the exchanged genes
for gen in gens:
child.pop(child.index(gen))
# Insert the new genes at a random position
ins_indx = int(floor(random()*self.size[0]))
new_children = child[0:ins_indx] + gens + child[ins_indx:len(child)]
# Write the new children in the new population matrix
for pnr in range(2):
new_pop[int((pnr + 0.5) * self.size[1] / 2 + nr)] = new_children[:]
# Mutate the 2nd half of the population matrix
mutate = list(range(self.size[1] // 2))
shuffle(mutate)
num_mutations = int(round(mutate_rate * self.size[1] / 2))
for nr in range(num_mutations):
# The genetic line that is exchanged in the child parent1 ???
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
while indx[0] == indx[1]:
indx = [int(floor(random()*self.size[0])), int(floor(random()*self.size[0]))]
indx.sort()
# Zu mutierende Line
# Line to be mutated ????
mutline = new_pop[self.size[1] // 2 + mutate[nr]]
if random() < 0.75: # Gen Abschnitt umdrehen / Turn gene segment
cut = mutline[indx[0]:indx[1] + 1]
cut.reverse()
mutline = mutline[0:indx[0]] + cut + mutline[indx[1] + 1:len(mutline)]
else: # 2 Gene tauschen / 2 Gene exchange
orgline = mutline[:]
mutline[indx[0]] = orgline[indx[1]]
mutline[indx[1]] = orgline[indx[0]]
new_pop[self.size[1] // 2 + mutate[nr]] = mutline
# Assign the new population matrix
self.pop = new_pop
class DistanceMatrixClass:
"""
DistanceMatrixClass
"""
def __init__(self):
self.matrix = []
self.size = [0, 0]
def __str__(self):
string = ("Distance Matrix; size: %i X %i" % (self.size[0], self.size[1]))
for line_x in self.matrix:
string += "\n"
for x_vals in line_x:
string += "%8.2f" % x_vals
return string
def generate_matrix(self, st_end_points):
self.matrix = [[st_end_y[1].distance(st_end_x[0]) for st_end_x in st_end_points]
for st_end_y in st_end_points]
self.size = [len(st_end_points), len(st_end_points)]
class FittnessClass:
def __init__(self, population, cur_fittness, order):
self.population = population
self.cur_fittness = cur_fittness
self.order = order
self.best_fittness = []
self.best_route = []
def __str__(self):
return "\nBest Fittness: %s \nBest Route: %s \nBest Pop: %s"\
% (self.best_fittness[-1], self.best_route, self.population.pop[self.best_route])
def calc_st_fittness(self, matrix, st_pop):
dis = matrix[st_pop[-1]][st_pop[0]]
for nr in range(1, len(st_pop)):
dis += matrix[st_pop[nr - 1]][st_pop[nr]]
self.best_fittness.append(dis)
def calc_cur_fittness(self, matrix):
# logger.debug("Calculating current fittness len(self.population.pop): %s"
# % len(self.population.pop))
# logger.debug("Length of self.cur_fittness: %s" %(len(self.cur_fittness)))
for pop_nr in range(len(self.population.pop)):
pop = self.population.pop[pop_nr]
# logger.debug("pop_nr: %s" %pop_nr)
dis = matrix[pop[-1]][pop[0]]
for nr in range(1, len(pop)):
dis += matrix[pop[nr - 1]][pop[nr]]
self.cur_fittness[pop_nr] = dis
# 2te Möglichkeit die Reihenfolge festzulegen (Korrekturfunktion=Aktiv)
# Second option set the order (correction function = Active)
def correct_constrain_order(self):
"""FIXME: in order to change the correction to have all ordered shapes
in begin this might be the best place to change it. Maybe we can also have
an additional option in the config file?"""
for pop in self.population.pop:
# Search the current order
order_index = self.get_pop_index_list(pop)
# Momentane Reihenfolge der indexe sortieren
# Current sort order of the index ???
order_index.sort()
# Indices according to correct order
for ind_nr in range(len(order_index)):
pop[order_index[ind_nr]] = self.order[ind_nr]
def set_startpoint(self):
n_pts = len(self.population.pop[-1])
for pop in self.population.pop:
st_pt_nr = pop.index(n_pts - 1)
# Contour with the starting point at the beginning
pop[:] = pop[st_pt_nr:n_pts] + pop[0:st_pt_nr]
def get_pop_index_list(self, pop):
return [pop.index(order) for order in self.order]
def select_best_fittness(self):
self.best_fittness.append(min(self.cur_fittness))
self.best_route = self.cur_fittness.index(self.best_fittness[-1])
|
gpl-3.0
| -2,967,414,026,964,483,000 | 6,686,774,979,118,138,000 | 39.375 | 109 | 0.558066 | false |
mujiansu/arangodb
|
3rdParty/V8-4.3.61/third_party/python_26/Tools/versioncheck/pyversioncheck.py
|
98
|
4051
|
"""pyversioncheck - Module to help with checking versions"""
import types
import rfc822
import urllib
import sys
# Verbose options
VERBOSE_SILENT=0 # Single-line reports per package
VERBOSE_NORMAL=1 # Single-line reports per package, more info if outdated
VERBOSE_EACHFILE=2 # Report on each URL checked
VERBOSE_CHECKALL=3 # Check each URL for each package
# Test directory
## urllib bug: _TESTDIR="ftp://ftp.cwi.nl/pub/jack/python/versiontestdir/"
_TESTDIR="http://www.cwi.nl/~jack/versiontestdir/"
def versioncheck(package, url, version, verbose=0):
ok, newversion, fp = checkonly(package, url, version, verbose)
if verbose > VERBOSE_NORMAL:
return ok
if ok < 0:
print '%s: No correctly formatted current version file found'%(package)
elif ok == 1:
print '%s: up-to-date (version %s)'%(package, version)
else:
print '%s: version %s installed, version %s found:' % \
(package, version, newversion)
if verbose > VERBOSE_SILENT:
while 1:
line = fp.readline()
if not line: break
sys.stdout.write('\t'+line)
return ok
def checkonly(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print '%s:'%package
if type(url) == types.StringType:
ok, newversion, fp = _check1version(package, url, version, verbose)
else:
for u in url:
ok, newversion, fp = _check1version(package, u, version, verbose)
if ok >= 0 and verbose < VERBOSE_CHECKALL:
break
return ok, newversion, fp
def _check1version(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print ' Checking %s'%url
try:
fp = urllib.urlopen(url)
except IOError, arg:
if verbose >= VERBOSE_EACHFILE:
print ' Cannot open:', arg
return -1, None, None
msg = rfc822.Message(fp, seekable=0)
newversion = msg.getheader('current-version')
if not newversion:
if verbose >= VERBOSE_EACHFILE:
print ' No "Current-Version:" header in URL or URL not found'
return -1, None, None
version = version.lower().strip()
newversion = newversion.lower().strip()
if version == newversion:
if verbose >= VERBOSE_EACHFILE:
print ' Version identical (%s)'%newversion
return 1, version, fp
else:
if verbose >= VERBOSE_EACHFILE:
print ' Versions different (installed: %s, new: %s)'% \
(version, newversion)
return 0, newversion, fp
def _test():
print '--- TEST VERBOSE=1'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=1)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=1)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=1)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=1)
print '--- TEST VERBOSE=2'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=2)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=2)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=2)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=2)
if __name__ == '__main__':
_test()
|
apache-2.0
| -6,328,898,275,831,039,000 | 5,791,179,041,588,783,000 | 40.336735 | 84 | 0.63688 | false |
iHateWEBos/shooter_kernel_34
|
scripts/gcc-wrapper.py
|
501
|
3410
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
gpl-2.0
| -4,554,756,847,362,768,400 | -993,194,094,129,752,300 | 33.795918 | 97 | 0.665689 | false |
yigitguler/django
|
tests/forms_tests/tests/test_util.py
|
12
|
4366
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from django.core.exceptions import ValidationError
from django.forms.utils import flatatt, ErrorDict, ErrorList
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import ugettext_lazy
from django.utils.encoding import python_2_unicode_compatible
class FormsUtilTestCase(TestCase):
# Tests for forms/utils.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), ' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), ' class="news" title="Read this"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this", 'required': "required"}), ' class="news" required="required" title="Read this"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this", 'required': True}), ' class="news" title="Read this" required')
self.assertEqual(flatatt({'class': "news", 'title': "Read this", 'required': False}), ' class="news" title="Read this"')
self.assertEqual(flatatt({}), '')
def test_validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>')
# Can take a unicode string.
self.assertHTMLEqual(six.text_type(ErrorList(ValidationError("Not \u03C0.").messages)),
'<ul class="errorlist"><li>Not π.</li></ul>')
# Can take a lazy string.
self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>')
# Can take a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>')
# Can take a mixture in a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["First error.", "Not \u03C0.", ugettext_lazy("Error.")]).messages)),
'<ul class="errorlist"><li>First error.</li><li>Not π.</li><li>Error.</li></ul>')
@python_2_unicode_compatible
class VeryBadError:
def __str__(self):
return "A very bad error."
# Can take a non-string.
self.assertHTMLEqual(str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>')
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
def test_error_dict_copy(self):
e = ErrorDict()
e['__all__'] = ErrorList([
ValidationError(
message='message %(i)s',
params={'i': 1},
),
ValidationError(
message='message %(i)s',
params={'i': 2},
),
])
e_copy = copy.copy(e)
self.assertEqual(e, e_copy)
self.assertEqual(e.as_data(), e_copy.as_data())
e_deepcopy = copy.deepcopy(e)
self.assertEqual(e, e_deepcopy)
self.assertEqual(e.as_data(), e_copy.as_data())
|
bsd-3-clause
| -717,087,206,445,093,500 | -6,400,980,580,141,043,000 | 45.425532 | 153 | 0.57516 | false |
seibert/numba
|
numba/roc/stubs.py
|
4
|
2822
|
from numba.core import types, typing, ir
_stub_error = NotImplementedError("This is a stub.")
def get_global_id(*args, **kargs):
"""
OpenCL get_global_id()
"""
raise _stub_error
def get_local_id(*args, **kargs):
"""
OpenCL get_local_id()
"""
raise _stub_error
def get_global_size(*args, **kargs):
"""
OpenCL get_global_size()
"""
raise _stub_error
def get_local_size(*args, **kargs):
"""
OpenCL get_local_size()
"""
raise _stub_error
def get_group_id(*args, **kargs):
"""
OpenCL get_group_id()
"""
raise _stub_error
def get_num_groups(*args, **kargs):
"""
OpenCL get_num_groups()
"""
raise _stub_error
def get_work_dim(*args, **kargs):
"""
OpenCL get_work_dim()
"""
raise _stub_error
def barrier(*args, **kargs):
"""
OpenCL barrier()
Example:
# workgroup barrier + local memory fence
hsa.barrier(hsa.CLK_LOCAL_MEM_FENCE)
# workgroup barrier + global memory fence
hsa.barrier(hsa.CLK_GLOBAL_MEM_FENCE)
# workgroup barrier + global memory fence
hsa.barrier()
"""
raise _stub_error
def mem_fence(*args, **kargs):
"""
OpenCL mem_fence()
Example:
# local memory fence
hsa.mem_fence(hsa.CLK_LOCAL_MEM_FENCE)
# global memory fence
hsa.mem_fence(hsa.CLK_GLOBAL_MEM_FENCE)
"""
raise _stub_error
def wavebarrier():
"""
HSAIL wavebarrier
"""
raise _stub_error
def activelanepermute_wavewidth(src, laneid, identity, useidentity):
"""
HSAIL activelanepermute_wavewidth_*
"""
raise _stub_error
def ds_permute(src_lane, dest_lane):
"""
AMDGCN Data Share intrinsic forwards permute (push semantics)
"""
raise _stub_error
def ds_bpermute(src_lane, dest_lane):
"""
AMDGCN Data Share intrinsic backwards permute (pull semantics)
"""
raise _stub_error
class Stub(object):
"""A stub object to represent special objects which is meaningless
outside the context of HSA-python.
"""
_description_ = '<ptx special value>'
__slots__ = () # don't allocate __dict__
def __new__(cls):
raise NotImplementedError("%s is not instantiable" % cls)
def __repr__(self):
return self._description_
class shared(Stub):
"""shared namespace
"""
_description_ = '<shared>'
def array(shape, dtype):
"""shared.array(shape, dtype)
Allocate a shared memory array.
"""
#-------------------------------------------------------------------------------
# atomic
class atomic(Stub):
"""atomic namespace
"""
_description_ = '<atomic>'
class add(Stub):
"""add(ary, idx, val)
Perform atomic ary[idx] += val
"""
|
bsd-2-clause
| -6,107,580,910,434,858,000 | -646,006,258,854,073,900 | 17.565789 | 80 | 0.565202 | false |
wldcordeiro/servo
|
tests/wpt/web-platform-tests/tools/pytest/_pytest/runner.py
|
173
|
17105
|
""" basic collect and runtest protocol implementations """
import bdb
import sys
from time import time
import py
import pytest
from _pytest._code.code import TerminalRepr, ExceptionInfo
def pytest_namespace():
return {
'fail' : fail,
'skip' : skip,
'importorskip' : importorskip,
'exit' : exit,
}
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type=int, default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
class NodeInfo:
def __init__(self, location):
self.location = location
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def pytest_runtest_setup(item):
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
def pytest_runtest_teardown(item, nextitem):
item.session._setupstate.teardown_exact(item, nextitem)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail") or
call.excinfo.errisinstance(skip.Exception) or
call.excinfo.errisinstance(bdb.BdbQuit))
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
self.stop = time()
raise
except:
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(pytest.skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" %(key, rwhen), content))
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
sections, duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location, keywords, outcome,
longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of (secname, data) extra information which needs to
#: marshallable
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(collector._memocollect, "memocollect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
rep.call = call # see collect_one_node
return rep
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result,
sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert py.builtin.callable(finalizer)
#assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except Exception:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
py.builtin._reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except Exception:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
# =============================================================
# Test OutcomeExceptions and helpers for creating them.
class OutcomeException(Exception):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
Exception.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
val = self.msg
if isinstance(val, bytes):
val = py._builtin._totext(val, errors='replace')
return val
return "<%s instance>" %(self.__class__.__name__,)
__str__ = __repr__
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = 'builtins'
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = 'builtins'
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason"):
self.msg = msg
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
__tracebackhide__ = True
raise Exit(msg)
exit.Exception = Exit
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the pytest.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
__tracebackhide__ = True
raise Skipped(msg=msg)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
""" explicitly fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
def importorskip(modname, minversion=None):
""" return imported module if it has at least "minversion" as its
__version__ attribute. If no minversion is specified the a skip
is only triggered if the module can not be imported.
"""
__tracebackhide__ = True
compile(modname, '', 'eval') # to catch syntaxerrors
try:
__import__(modname)
except ImportError:
skip("could not import %r" %(modname,))
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if minversion is not None:
try:
from pkg_resources import parse_version as pv
except ImportError:
skip("we have a required version for %r but can not import "
"no pkg_resources to parse version strings." %(modname,))
if verattr is None or pv(verattr) < pv(minversion):
skip("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion))
return mod
|
mpl-2.0
| -1,278,673,746,570,306,300 | -5,931,926,893,529,915,000 | 32.213592 | 79 | 0.603508 | false |
rjschwei/azure-sdk-for-python
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py
|
3
|
2832
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .catalog_item import CatalogItem
class USqlAssembly(CatalogItem):
"""A Data Lake Analytics catalog U-SQL Assembly.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param database_name: the name of the database.
:type database_name: str
:param name: the name of the assembly.
:type name: str
:param clr_name: the name of the CLR.
:type clr_name: str
:param is_visible: the switch indicating if this assembly is visible or
not.
:type is_visible: bool
:param is_user_defined: the switch indicating if this assembly is user
defined or not.
:type is_user_defined: bool
:param files: the list of files associated with the assembly
:type files: list of :class:`USqlAssemblyFileInfo
<azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyFileInfo>`
:param dependencies: the list of dependencies associated with the assembly
:type dependencies: list of :class:`USqlAssemblyDependencyInfo
<azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyDependencyInfo>`
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'name': {'key': 'assemblyName', 'type': 'str'},
'clr_name': {'key': 'clrName', 'type': 'str'},
'is_visible': {'key': 'isVisible', 'type': 'bool'},
'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'},
'files': {'key': 'files', 'type': '[USqlAssemblyFileInfo]'},
'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'},
}
def __init__(self, compute_account_name=None, version=None, database_name=None, name=None, clr_name=None, is_visible=None, is_user_defined=None, files=None, dependencies=None):
super(USqlAssembly, self).__init__(compute_account_name=compute_account_name, version=version)
self.database_name = database_name
self.name = name
self.clr_name = clr_name
self.is_visible = is_visible
self.is_user_defined = is_user_defined
self.files = files
self.dependencies = dependencies
|
mit
| 7,957,878,500,158,119,000 | -1,552,328,569,312,909,600 | 44.677419 | 180 | 0.635593 | false |
BlueCrystalLabs/bgfx
|
3rdparty/scintilla/test/lexTests.py
|
65
|
3416
|
# -*- coding: utf-8 -*-
# Requires Python 2.7 or later
import io, os, sys, unittest
if sys.platform == "win32":
import XiteWin as Xite
else:
import XiteQt as Xite
keywordsHTML = [
b"b body content head href html link meta "
b"name rel script strong title type xmlns",
b"function",
b"sub"
]
class TestLexers(unittest.TestCase):
def setUp(self):
self.xite = Xite.xiteFrame
self.ed = self.xite.ed
self.ed.ClearAll()
self.ed.EmptyUndoBuffer()
def AsStyled(self):
text = self.ed.Contents()
data = io.BytesIO()
prevStyle = -1
for o in range(self.ed.Length):
styleNow = self.ed.GetStyleAt(o)
if styleNow != prevStyle:
styleBuf = "{%0d}" % styleNow
data.write(styleBuf.encode('utf-8'))
prevStyle = styleNow
data.write(text[o:o+1])
return data.getvalue()
def LexExample(self, name, lexerName, keywords=None):
if keywords is None:
keywords = []
self.ed.SetCodePage(65001)
self.ed.LexerLanguage = lexerName
bits = self.ed.StyleBitsNeeded
mask = 2 << bits - 1
self.ed.StyleBits = bits
for i in range(len(keywords)):
self.ed.SetKeyWords(i, keywords[i])
nameExample = os.path.join("examples", name)
namePrevious = nameExample +".styled"
nameNew = nameExample +".new"
with open(nameExample, "rb") as f:
prog = f.read()
BOM = b"\xEF\xBB\xBF"
if prog.startswith(BOM):
prog = prog[len(BOM):]
lenDocument = len(prog)
self.ed.AddText(lenDocument, prog)
self.ed.Colourise(0, lenDocument)
self.assertEquals(self.ed.EndStyled, lenDocument)
try:
with open(namePrevious, "rb") as f:
prevStyled = f.read()
except FileNotFoundError:
prevStyled = ""
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
print(progStyled)
print(prevStyled)
self.assertEquals(progStyled, prevStyled)
# The whole file doesn't parse like it did before so don't try line by line
# as that is likely to fail many times.
return
# Try partial lexes from the start of every line which should all be identical.
for line in range(self.ed.LineCount):
lineStart = self.ed.PositionFromLine(line)
self.ed.StartStyling(lineStart, mask)
self.assertEquals(self.ed.EndStyled, lineStart)
self.ed.Colourise(lineStart, lenDocument)
progStyled = self.AsStyled()
if progStyled != prevStyled:
with open(nameNew, "wb") as f:
f.write(progStyled)
self.assertEquals(progStyled, prevStyled)
# Give up after one failure
return
def testCXX(self):
self.LexExample("x.cxx", b"cpp", [b"int"])
def testPython(self):
self.LexExample("x.py", b"python",
[b"class def else for if import in print return while"])
def testHTML(self):
self.LexExample("x.html", b"hypertext", keywordsHTML)
def testASP(self):
self.LexExample("x.asp", b"hypertext", keywordsHTML)
def testPHP(self):
self.LexExample("x.php", b"hypertext", keywordsHTML)
def testVB(self):
self.LexExample("x.vb", b"vb", [b"as dim or string"])
def testLua(self):
self.LexExample("x.lua", b"lua", [b"function end"])
def testRuby(self):
self.LexExample("x.rb", b"ruby", [b"class def end"])
def testPerl(self):
self.LexExample("x.pl", b"perl", [b"printf sleep use while"])
def testD(self):
self.LexExample("x.d", b"d",
[b"keyword1", b"keyword2", b"", b"keyword4", b"keyword5",
b"keyword6", b"keyword7"])
if __name__ == '__main__':
Xite.main("lexTests")
|
bsd-2-clause
| -1,858,739,687,510,134,800 | -2,210,251,850,489,764,900 | 26.111111 | 81 | 0.680328 | false |
sh1nu11bi/RATDecoders
|
BlackShades.py
|
8
|
5801
|
#!/usr/bin/env python
'''
BlackShades RAT Decoder
Original Script by Brian Wallace (@botnet_hunter)
'''
__description__ = 'DarkComet Rat Config Extractor\nOriginal Script by Brian Wallace (@botnet_hunter)'
__author__ = 'Kevin Breen http://techanarchy.net'
__OrigionalCode__ = 'v1.0.0 by Brian Wallace (@botnet_hunter)'
__version__ = '0.1'
__date__ = '2014/05/23'
import os
import sys
import string
import re
from optparse import OptionParser
prng_seed = 0
def is_valid_config(config):
if config[:3] != "\x0c\x0c\x0c":
return False
if config.count("\x0C\x0C\x0C") < 15:
return False
return True
def get_next_rng_value():
global prng_seed
prng_seed = ((prng_seed * 1140671485 + 12820163) & 0xffffff)
return prng_seed / 65536
def decrypt_configuration(hex):
global prng_seed
ascii = hex.decode('hex')
tail = ascii[0x20:]
pre_check = []
for x in xrange(3):
pre_check.append(ord(tail[x]) ^ 0x0c)
for x in xrange(0xffffff):
prng_seed = x
if get_next_rng_value() != pre_check[0] or get_next_rng_value() != pre_check[1] or get_next_rng_value() != pre_check[2]:
continue
prng_seed = x
config = "".join((chr(ord(c) ^ int(get_next_rng_value())) for c in tail))
if is_valid_config(config):
return config.split("\x0c\x0c\x0c")
return None
def config_extract(raw_data):
config_pattern = re.findall('[0-9a-fA-F]{154,}', raw_data)
for s in config_pattern:
if (len(s) % 2) == 1:
s = s[:-1]
return s
def config_parser(config):
config_dict = {}
config_dict['Domain'] = config[1]
config_dict['Client Control Port'] = config[2]
config_dict['Client Transfer Port'] = config[3]
config_dict['Campaign ID'] = config[4]
config_dict['File Name'] = config[5]
config_dict['Install Path'] = config[6]
config_dict['Registry Key'] = config[7]
config_dict['ActiveX Key'] = config[8]
config_dict['Install Flag'] = config[9]
config_dict['Hide File'] = config[10]
config_dict['Melt File'] = config[11]
config_dict['Delay'] = config[12]
config_dict['USB Spread'] = config[13]
config_dict['Mutex'] = config[14]
config_dict['Log File'] = config[15]
config_dict['Folder Name'] = config[16]
config_dict['Smart DNS'] = config[17]
config_dict['Protect Process'] = config[18]
return config_dict
def run(data):
raw_config = config_extract(data)
config = decrypt_configuration(raw_config)
if config is not None and len(config) > 15:
sorted_config = config_parser(config)
return sorted_config
return None
#Recursive Function Goes Here
def runRecursive(folder, output):
counter1 = 0
counter2 = 0
print "[+] Writing Configs to File {0}".format(output)
with open(output, 'a+') as out:
#This line will need changing per Decoder
out.write("File Name, Campaign ID, Domain, Transfer Port, Control Port, File Name, Install Path, Registry Key, ActiveX Key, Install Flag, Hide File, Melt File, Delay, USB Spread, Mutex, Log File, Folder Name, Smart DNS, Protect Process\n")
for server in os.listdir(folder):
fileData = open(os.path.join(folder,server), 'rb').read()
configOut = run(fileData)
if configOut != None:
#This line will need changing per Decoder
out.write('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13},{14},{15},{16},{17},{18}\n'.format(server, configOut["Campaign ID"],configOut["Domain"],configOut["Client Transfer Port"],configOut["Client Control Port"],configOut["File Name"],configOut["Install Path"],configOut["Registry Key"],configOut["ActiveX Key"],configOut["Install Flag"],configOut["Hide File"],configOut["Melt File"],configOut["Delay"],configOut["USB Spread"],configOut["Mutex"],configOut["Log File"],configOut["Folder Name"],configOut["Smart DNS"],configOut["Protect Process"]))
counter1 += 1
counter2 += 1
print "[+] Decoded {0} out of {1} Files".format(counter1, counter2)
return "Complete"
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__)
parser.add_option("-r", "--recursive", action='store_true', default=False, help="Recursive Mode")
(options, args) = parser.parse_args()
# If we dont have args quit with help page
if len(args) > 0:
pass
else:
parser.print_help()
sys.exit()
# if we want a recursive extract run this function
if options.recursive == True:
if len(args) == 2:
runRecursive(args[0], args[1])
sys.exit()
else:
print "[+] You need to specify Both Dir to read AND Output File"
parser.print_help()
sys.exit()
# If not recurisve try to open file
try:
print "[+] Reading file"
fileData = open(args[0], 'rb').read()
except:
print "[+] Couldn't Open File {0}".format(args[0])
sys.exit()
#Run the config extraction
print "[+] Searching for Config"
config = run(fileData)
#If we have a config figure out where to dump it out.
if config == None:
print "[+] Config not found"
sys.exit()
#if you gave me two args im going to assume the 2nd arg is where you want to save the file
if len(args) == 2:
print "[+] Writing Config to file {0}".format(args[1])
with open(args[1], 'a') as outFile:
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value))
# if no seconds arg then assume you want it printing to screen
else:
print "[+] Printing Config to screen"
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
print " [-] Key: {0}\t Value: {1}".format(key,clean_value)
print "[+] End of Config"
|
gpl-3.0
| 6,958,602,372,156,583,000 | 133,949,770,991,930,100 | 34.371951 | 564 | 0.64713 | false |
tornadozou/tensorflow
|
tensorflow/contrib/signal/python/ops/spectral_ops.py
|
15
|
7918
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.signal.python.ops import reconstruction_ops
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.contrib.signal.python.ops import window_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=functools.partial(window_ops.hann_window, periodic=True),
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, `frame_step` is not scalar, or `frame_length`
is greater than `fft_length`.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
frame_length_static = tensor_util.constant_value(
frame_length)
fft_length_static = tensor_util.constant_value(fft_length)
if (frame_length_static is not None and fft_length_static is not None and
frame_length_static > fft_length_static):
raise ValueError('frame_length (%d) may not be larger than '
'fft_length (%d)' % (frame_length_static,
fft_length_static))
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# spectral_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return spectral_ops.rfft(framed_signals, [fft_length])
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=functools.partial(window_ops.hann_window,
periodic=True),
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar, or
`frame_length` is greater than `fft_length`.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
frame_length_static = tensor_util.constant_value(
frame_length)
fft_length_static = tensor_util.constant_value(fft_length)
if (frame_length_static is not None and fft_length_static is not None and
frame_length_static > fft_length_static):
raise ValueError('frame_length (%d) may not be larger than '
'fft_length (%d)' % (frame_length_static,
fft_length_static))
real_frames = spectral_ops.irfft(stfts, [fft_length])[..., :frame_length]
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(2.0, math_ops.ceil(
math_ops.log(math_ops.to_float(value)) / math_ops.log(2.0))),
value.dtype)
|
apache-2.0
| 5,741,711,291,082,943,000 | -2,402,076,127,886,642,700 | 42.988889 | 80 | 0.671382 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.